initial version of ppcg
[ppcg.git] / cuda.c
bloba96aa7d672de7a7f67ddc9ffdd3766fc9c3d612e
1 /*
2 * Copyright 2010-2011 INRIA Saclay
4 * Use of this software is governed by the GNU LGPLv2.1 license
6 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
7 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
8 * 91893 Orsay, France
9 */
11 #include <assert.h>
12 #include <stdlib.h>
14 #include <isl/polynomial.h>
15 #include <isl/union_set.h>
16 #include <isl/ilp.h>
17 #include <isl/flow.h>
18 #include <isl/band.h>
19 #include <isl/schedule.h>
20 #include <isl/options.h>
21 #include <cloog/isl/cloog.h>
23 #include "cuda.h"
24 #include "cuda_common.h"
25 #include "gpucode.h"
26 #include "schedule.h"
27 #include "scoplib_isl.h"
28 #include "ppcg_options.h"
30 /* The fields stride, shift and shift_map only contain valid information
31 * if shift != NULL.
32 * If so, they express that current index is such that if you add shift,
33 * then the result is always a multiple of stride.
34 * shift_map contains the mapping
36 * i -> (i + shift)/stride
38 struct cuda_array_bound {
39 isl_int size;
40 isl_qpolynomial *lb;
42 isl_int stride;
43 isl_qpolynomial *shift;
44 isl_basic_map *shift_map;
47 struct cuda_array_info;
49 /* A group of array references in a kernel that should be handled together.
50 * If private_bound is not NULL, then it is mapped to registers.
51 * Otherwise, if shared_bound is not NULL, it is mapped to shared memory.
52 * Otherwise, it is accesses from global memory.
54 struct cuda_array_ref_group {
55 /* The references in this group access this array. */
56 struct cuda_array_info *array;
57 /* Position of this group in the list of reference groups of array. */
58 int nr;
60 /* The following fields are use during the construction of the groups.
61 * access is the combined access relation relative to the shared
62 * memory tiling.
63 * write is set if any access in the group is a write.
65 isl_map *access;
66 int write;
68 /* For each index, size and offset of piece in shared memory. */
69 struct cuda_array_bound *shared_bound;
71 /* For each index, size and offset of piece in private memory. */
72 struct cuda_array_bound *private_bound;
74 /* References in this group; point to elements of a linked list. */
75 int n_ref;
76 struct cuda_stmt_access **refs;
79 struct cuda_array_info {
80 isl_dim *dim;
81 /* Name of the array. */
82 char *name;
83 /* Number of indices. */
84 unsigned n_index;
85 /* For each index, a bound on the array in that direction. */
86 isl_pw_qpolynomial_fold **bound;
87 /* For each index, bound[i] specialized to the current kernel. */
88 isl_pw_qpolynomial_fold **local_bound;
90 /* All references to this array; point to elements of a linked list. */
91 int n_ref;
92 struct cuda_stmt_access **refs;
94 /* The reference groups associated to this array. */
95 int n_group;
96 struct cuda_array_ref_group **groups;
98 /* Last shared memory tile dimension that affects tile of this array. */
99 int last_shared;
100 /* Dimension at which copying to/from shared memory is printed.
101 * if >= 0, then the value is >= last_shared
102 * if -1, then the copying is done at the leaf level.
104 int print_shared_level;
107 /* Print the name of the local copy of a given group of array references.
109 static void print_array_name(FILE *out, struct cuda_array_ref_group *group)
111 int global = 0;
113 if (group->private_bound)
114 fprintf(out, "private_");
115 else if (group->shared_bound)
116 fprintf(out, "shared_");
117 else
118 global = 1;
119 fprintf(out, "%s", group->array->name);
120 if (!global && group->array->n_group > 1)
121 fprintf(out, "_%d", group->nr);
124 /* Collect all references to the given array and store pointers to them
125 * in array->refs.
127 static void collect_references(struct cuda_gen *gen,
128 struct cuda_array_info *array)
130 int i;
131 int n;
133 n = 0;
134 for (i = 0; i < gen->n_stmts; ++i) {
135 struct cuda_stmt *stmt = &gen->stmts[i];
136 struct cuda_stmt_access *access;
138 for (access = stmt->accesses; access; access = access->next) {
139 const char *name;
140 name = isl_map_get_tuple_name(access->access,
141 isl_dim_out);
142 if (name && !strcmp(array->name, name))
143 n++;
147 array->n_ref = n;
148 array->refs = isl_alloc_array(gen->ctx, struct cuda_stmt_access *, n);
149 assert(array->refs);
151 n = 0;
152 for (i = 0; i < gen->n_stmts; ++i) {
153 struct cuda_stmt *stmt = &gen->stmts[i];
154 struct cuda_stmt_access *access;
156 for (access = stmt->accesses; access; access = access->next) {
157 const char *name;
158 name = isl_map_get_tuple_name(access->access,
159 isl_dim_out);
160 if (!name || strcmp(array->name, name))
161 continue;
163 array->refs[n++] = access;
168 static struct cuda_array_bound *create_bound_list(isl_ctx *ctx, int n_index)
170 int i;
171 struct cuda_array_bound *bound;
173 bound = isl_alloc_array(ctx, struct cuda_array_bound, n_index);
174 assert(bound);
176 for (i = 0; i < n_index; ++i) {
177 isl_int_init(bound[i].size);
178 bound[i].lb = NULL;
179 isl_int_init(bound[i].stride);
180 bound[i].shift = NULL;
181 bound[i].shift_map = NULL;
184 return bound;
187 static void free_bound_list(struct cuda_array_bound *bound, int n_index)
189 int j;
191 if (!bound)
192 return;
194 for (j = 0; j < n_index; ++j) {
195 isl_int_clear(bound[j].size);
196 isl_int_clear(bound[j].stride);
197 isl_qpolynomial_free(bound[j].lb);
198 isl_qpolynomial_free(bound[j].shift);
199 isl_basic_map_free(bound[j].shift_map);
201 free(bound);
204 /* Compute bounds on the host arrays based on the accessed elements
205 * and collect all references to the array.
207 static int extract_array_info(__isl_take isl_set *array, void *user)
209 int i;
210 struct cuda_gen *gen = (struct cuda_gen *)user;
211 const char *name;
212 int n_index;
213 isl_pw_qpolynomial_fold **bounds;
214 isl_pw_qpolynomial_fold **local_bounds;
216 n_index = isl_set_dim(array, isl_dim_set);
217 name = isl_set_get_tuple_name(array);
218 bounds = isl_alloc_array(isl_set_get_ctx(array),
219 isl_pw_qpolynomial_fold *, n_index);
220 assert(bounds);
221 local_bounds = isl_calloc_array(isl_set_get_ctx(array),
222 isl_pw_qpolynomial_fold *, n_index);
223 assert(local_bounds);
224 gen->array[gen->n_array].dim = isl_set_get_dim(array);
225 gen->array[gen->n_array].name = strdup(name);
226 gen->array[gen->n_array].n_index = n_index;
227 gen->array[gen->n_array].bound = bounds;
228 gen->array[gen->n_array].local_bound = local_bounds;
230 for (i = 0; i < n_index; ++i) {
231 isl_dim *dim;
232 isl_qpolynomial *qp, *one;
233 isl_pw_qpolynomial *pwqp;
234 isl_pw_qpolynomial_fold *pwf;
236 dim = isl_set_get_dim(array);
237 one = isl_qpolynomial_one(isl_dim_copy(dim));
238 qp = isl_qpolynomial_var(dim, isl_dim_set, i);
239 qp = isl_qpolynomial_add(qp, one);
240 pwqp = isl_pw_qpolynomial_alloc(isl_set_copy(array), qp);
241 pwf = isl_pw_qpolynomial_bound(pwqp, isl_fold_max, NULL);
242 pwf = isl_pw_qpolynomial_fold_gist(pwf,
243 isl_set_copy(gen->context));
245 bounds[i] = pwf;
248 collect_references(gen, &gen->array[gen->n_array]);
250 gen->n_array++;
252 isl_set_free(array);
253 return 0;
256 void collect_array_info(struct cuda_gen *gen)
258 isl_union_set *arrays;
260 arrays = isl_union_map_range(isl_union_map_copy(gen->read));
261 arrays = isl_union_set_union(arrays,
262 isl_union_map_range(isl_union_map_copy(gen->write)));
263 arrays = isl_union_set_coalesce(arrays);
265 gen->n_array = isl_union_set_n_set(arrays);
266 gen->array = isl_alloc_array(gen->ctx,
267 struct cuda_array_info, gen->n_array);
268 assert(gen->array);
269 gen->n_array = 0;
270 isl_union_set_foreach_set(arrays, &extract_array_info, gen);
271 isl_union_set_free(arrays);
274 static void free_array_info(struct cuda_gen *gen)
276 int i, j;
278 for (i = 0; i < gen->n_array; ++i) {
279 int n_index = gen->array[i].n_index;
280 free(gen->array[i].name);
281 for (j = 0; j < n_index; ++j) {
282 isl_pw_qpolynomial_fold_free(gen->array[i].bound[j]);
283 isl_pw_qpolynomial_fold_free(gen->array[i].local_bound[j]);
285 isl_dim_free(gen->array[i].dim);
286 free(gen->array[i].bound);
287 free(gen->array[i].local_bound);
288 free(gen->array[i].refs);
290 free(gen->array);
293 static void declare_device_arrays(struct cuda_gen *gen)
295 int i;
297 for (i = 0; i < gen->n_array; ++i)
298 fprintf(gen->cuda.host_c, "%s *dev_%s;\n",
299 gen->options->type, gen->array[i].name);
302 static void print_array_size(struct cuda_gen *gen, FILE *out,
303 struct cuda_array_info *array)
305 int i;
306 isl_printer *prn;
308 prn = isl_printer_to_file(gen->ctx, out);
309 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
310 for (i = 0; i < array->n_index; ++i) {
311 prn = isl_printer_print_str(prn, "(");
312 prn = isl_printer_print_pw_qpolynomial_fold(prn,
313 array->bound[i]);
314 prn = isl_printer_print_str(prn, ") * ");
316 prn = isl_printer_print_str(prn, "sizeof(");
317 prn = isl_printer_print_str(prn, gen->options->type);
318 prn = isl_printer_print_str(prn, ")");
319 isl_printer_free(prn);
322 static void allocate_device_arrays(struct cuda_gen *gen)
324 int i;
326 for (i = 0; i < gen->n_array; ++i) {
327 fprintf(gen->cuda.host_c, "cudaMalloc(&dev_%s, ",
328 gen->array[i].name);
329 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
330 fprintf(gen->cuda.host_c, ");\n");
334 static void free_device_arrays(struct cuda_gen *gen)
336 int i;
338 for (i = 0; i < gen->n_array; ++i)
339 fprintf(gen->cuda.host_c, "cudaFree(dev_%s);\n",
340 gen->array[i].name);
343 static void copy_arrays_to_device(struct cuda_gen *gen)
345 int i;
347 for (i = 0; i < gen->n_array; ++i) {
348 isl_dim *dim;
349 isl_set *read_i;
350 int empty;
352 dim = isl_dim_copy(gen->array[i].dim);
353 read_i = isl_union_set_extract_set(gen->copy_in, dim);
354 empty = isl_set_fast_is_empty(read_i);
355 isl_set_free(read_i);
356 if (empty)
357 continue;
359 fprintf(gen->cuda.host_c, "assert(sizeof(%s) == ",
360 gen->array[i].name);
361 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
362 fprintf(gen->cuda.host_c, ");\n");
363 fprintf(gen->cuda.host_c, "cudaMemcpy(dev_%s, %s, ",
364 gen->array[i].name, gen->array[i].name);
365 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
366 fprintf(gen->cuda.host_c, ", cudaMemcpyHostToDevice);\n");
370 static void copy_arrays_from_device(struct cuda_gen *gen)
372 int i;
373 isl_union_set *write;
374 write = isl_union_map_range(isl_union_map_copy(gen->write));
376 for (i = 0; i < gen->n_array; ++i) {
377 isl_dim *dim;
378 isl_set *write_i;
379 int empty;
381 dim = isl_dim_copy(gen->array[i].dim);
382 write_i = isl_union_set_extract_set(write, dim);
383 empty = isl_set_fast_is_empty(write_i);
384 isl_set_free(write_i);
385 if (empty)
386 continue;
388 fprintf(gen->cuda.host_c, "cudaMemcpy(%s, dev_%s, ",
389 gen->array[i].name, gen->array[i].name);
390 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
391 fprintf(gen->cuda.host_c, ", cudaMemcpyDeviceToHost);\n");
394 isl_union_set_free(write);
397 static void read_sizes_from_file(struct cuda_gen *gen, const char *filename,
398 int *sizes, int len)
400 int i;
401 FILE *file;
403 file = fopen(filename, "r");
404 if (!file)
405 return;
407 for (i = 0; i < len; ++i)
408 if (fscanf(file, "%d", &sizes[i]) < 1)
409 break;
411 fclose(file);
414 static void reverse_list(int *list, int len)
416 int i;
417 int t;
419 for (i = 0; 2 * i < len; ++i) {
420 t = list[i];
421 list[i] = list[len - 1 - i];
422 list[len - 1 - i] = t;
426 /* Read user specified sizes from "tile.sizes", "block.sizes" and "grid.sizes"
427 * after filling in some potentially useful defaults.
429 static void read_sizes(struct cuda_gen *gen)
431 int n;
433 gen->tile_size = isl_alloc_array(gen->ctx, int, gen->tile_len);
434 assert(gen->tile_size);
435 for (n = 0; n < gen->tile_len; ++n)
436 gen->tile_size[n] = gen->options->tile_size;
437 read_sizes_from_file(gen, "tile.sizes", gen->tile_size, gen->tile_len);
439 n = gen->n_parallel;
440 gen->n_block = (n <= 3) ? n : 3;
441 switch (gen->n_block) {
442 case 1:
443 gen->block_dim[0] = 512;
444 break;
445 case 2:
446 gen->block_dim[0] = 32;
447 gen->block_dim[1] = 16;
448 break;
449 default:
450 gen->block_dim[0] = 32;
451 gen->block_dim[1] = 4;
452 gen->block_dim[2] = 4;
453 break;
455 read_sizes_from_file(gen, "block.sizes", gen->block_dim, gen->n_block);
456 reverse_list(gen->block_dim, gen->n_block);
458 gen->n_grid = (n <= 2) ? n : 2;
459 switch (gen->n_grid) {
460 case 1:
461 gen->grid_dim[0] = 65536;
462 break;
463 default:
464 gen->grid_dim[0] = 256;
465 gen->grid_dim[1] = 256;
466 break;
468 read_sizes_from_file(gen, "grid.sizes", gen->grid_dim, gen->n_grid);
469 reverse_list(gen->grid_dim, gen->n_grid);
472 static void free_stmts(struct cuda_stmt *stmts, int n)
474 int i;
476 for (i = 0; i < n; ++i) {
477 struct cuda_stmt_access *access, *next;
479 for (access = stmts[i].accesses; access; access = next) {
480 next = access->next;
481 isl_map_free(access->access);
482 free(access);
485 isl_set_free(stmts[i].domain);
486 free(stmts[i].text);
488 free(stmts);
491 void clear_cuda_gen(struct cuda_gen *gen)
493 free_stmts(gen->stmts, gen->n_stmts);
494 free_array_info(gen);
495 isl_set_free(gen->context);
496 isl_union_set_free(gen->copy_in);
497 isl_union_map_free(gen->sched);
498 isl_union_map_free(gen->read);
499 isl_union_map_free(gen->write);
502 static void print_reverse_list(FILE *out, int len, int *list)
504 int i;
506 for (i = 0; i < len; ++i) {
507 if (i)
508 fprintf(out, ", ");
509 fprintf(out, "%d", list[len - 1 - i]);
513 static void print_kernel_launch(struct cuda_gen *gen,
514 __isl_keep isl_union_set *arrays)
516 int i;
517 int first = 1;
518 unsigned nparam;
519 isl_dim *dim;
521 print_indent(gen->code.dst, gen->code.indent);
522 fprintf(gen->code.dst, "kernel%d <<<k%d_dimGrid, k%d_dimBlock>>> (",
523 gen->kernel_id, gen->kernel_id, gen->kernel_id);
524 fprintf(gen->cuda.kernel_c, "__global__ void kernel%d(",
525 gen->kernel_id);
526 fprintf(gen->cuda.kernel_h, "__global__ void kernel%d(",
527 gen->kernel_id);
529 for (i = 0; i < gen->n_array; ++i) {
530 isl_dim *dim;
531 isl_set *arr;
532 int empty;
534 dim = isl_dim_copy(gen->array[i].dim);
535 arr = isl_union_set_extract_set(arrays, dim);
536 empty = isl_set_fast_is_empty(arr);
537 isl_set_free(arr);
538 if (empty)
539 continue;
541 if (!first) {
542 fprintf(gen->code.dst, ", ");
543 fprintf(gen->cuda.kernel_c, ", ");
544 fprintf(gen->cuda.kernel_h, ", ");
547 fprintf(gen->code.dst, "dev_%s", gen->array[i].name);
548 fprintf(gen->cuda.kernel_c, "%s *%s",
549 gen->options->type, gen->array[i].name);
550 fprintf(gen->cuda.kernel_h, "%s *%s",
551 gen->options->type, gen->array[i].name);
553 first = 0;
556 dim = isl_union_set_get_dim(arrays);
557 nparam = isl_dim_size(dim, isl_dim_param);
558 for (i = 0; i < nparam; ++i) {
559 const char *name = isl_dim_get_name(dim, isl_dim_param, i);
560 if (!first) {
561 fprintf(gen->code.dst, ", ");
562 fprintf(gen->cuda.kernel_c, ", ");
563 fprintf(gen->cuda.kernel_h, ", ");
565 fprintf(gen->code.dst, "%s", name);
566 fprintf(gen->cuda.kernel_c, "int %s", name);
567 fprintf(gen->cuda.kernel_h, "int %s", name);
568 first = 0;
570 isl_dim_free(dim);
572 for (i = 0; i < gen->tile_first; ++i) {
573 if (!first) {
574 fprintf(gen->code.dst, ", ");
575 fprintf(gen->cuda.kernel_c, ", ");
576 fprintf(gen->cuda.kernel_h, ", ");
578 fprintf(gen->code.dst, "h%d", i);
579 fprintf(gen->cuda.kernel_c, "int h%d", i);
580 fprintf(gen->cuda.kernel_h, "int h%d", i);
581 first = 0;
584 fprintf(gen->code.dst, ");\n");
585 fprintf(gen->cuda.kernel_c, ")\n");
586 fprintf(gen->cuda.kernel_h, ");\n");
589 /* Construct a map from a domain of dimensionality "len"
590 * to a domain of dimensionality "len" + "tile_len" that tiles
591 * the "tile_len" coordinates starting at "first".
592 * In particular, [s_i] -> [s_i / tile_size[i], s_i % tile_size[i]].
593 * "dim" prescribes the parameters.
595 static __isl_give isl_map *tile(__isl_take isl_dim *dim, int len,
596 int first, int tile_len, int *tile_size)
598 int i;
599 isl_int v;
600 isl_basic_map *bmap;
601 isl_constraint *c;
603 isl_int_init(v);
605 dim = isl_dim_add(dim, isl_dim_in, len);
606 dim = isl_dim_add(dim, isl_dim_out, len + tile_len);
607 bmap = isl_basic_map_universe(isl_dim_copy(dim));
609 for (i = 0; i < len - tile_len; ++i) {
610 int j = i < first ? i : i + tile_len;
611 int k = i < first ? i : i + 2 * tile_len;
613 c = isl_equality_alloc(isl_dim_copy(dim));
614 isl_int_set_si(v, -1);
615 isl_constraint_set_coefficient(c, isl_dim_in, j, v);
616 isl_int_set_si(v, 1);
617 isl_constraint_set_coefficient(c, isl_dim_out, k, v);
618 bmap = isl_basic_map_add_constraint(bmap, c);
621 for (i = 0; i < tile_len; ++i) {
622 c = isl_equality_alloc(isl_dim_copy(dim));
623 isl_int_set_si(v, -1);
624 isl_constraint_set_coefficient(c, isl_dim_in, first + i, v);
625 isl_int_set_si(v, tile_size[i]);
626 isl_constraint_set_coefficient(c, isl_dim_out, first + i, v);
627 isl_int_set_si(v, 1);
628 isl_constraint_set_coefficient(c, isl_dim_out,
629 first + i + tile_len, v);
630 bmap = isl_basic_map_add_constraint(bmap, c);
632 c = isl_inequality_alloc(isl_dim_copy(dim));
633 isl_int_set_si(v, 1);
634 isl_constraint_set_coefficient(c, isl_dim_out,
635 first + i + tile_len, v);
636 bmap = isl_basic_map_add_constraint(bmap, c);
638 c = isl_inequality_alloc(isl_dim_copy(dim));
639 isl_int_set_si(v, -1);
640 isl_constraint_set_coefficient(c, isl_dim_out,
641 first + i + tile_len, v);
642 isl_int_set_si(v, tile_size[i] - 1);
643 isl_constraint_set_constant(c, v);
644 bmap = isl_basic_map_add_constraint(bmap, c);
647 isl_dim_free(dim);
648 isl_int_clear(v);
650 return isl_map_from_basic_map(bmap);
653 /* Construct a map from a domain of dimensionality "len"
654 * to a domain of dimensionality "len" + "wrap_len" that "wraps"
655 * the "wrap_len" coordinates starting at "first" according to "wrap_size".
656 * In particular, [s_i] -> [s_i, s_i % wrap_size[i]].
657 * To do so, we need extra variables corresponding to [s_i / wrap_size[i]],
658 * that are projected out at the end.
659 * "dim" prescribes the parameters.
661 static __isl_give isl_map *wrap(__isl_take isl_dim *dim, int len,
662 int first, int wrap_len, int *wrap_size)
664 int i;
665 isl_basic_map *bmap;
666 isl_constraint *c;
668 dim = isl_dim_add(dim, isl_dim_in, len);
669 dim = isl_dim_add(dim, isl_dim_out, len + 2 * wrap_len);
670 bmap = isl_basic_map_universe(isl_dim_copy(dim));
672 for (i = 0; i < len; ++i) {
673 int k = i < first + wrap_len ? i : i + 2 * wrap_len;
675 c = isl_equality_alloc(isl_dim_copy(dim));
676 isl_constraint_set_coefficient_si(c, isl_dim_in, i, -1);
677 isl_constraint_set_coefficient_si(c, isl_dim_out, k, 1);
678 bmap = isl_basic_map_add_constraint(bmap, c);
681 for (i = 0; i < wrap_len; ++i) {
682 c = isl_equality_alloc(isl_dim_copy(dim));
683 isl_constraint_set_coefficient_si(c, isl_dim_out,
684 first + i, -1);
685 isl_constraint_set_coefficient_si(c, isl_dim_out,
686 first + wrap_len + i, 1);
687 isl_constraint_set_coefficient_si(c, isl_dim_out,
688 first + 2 * wrap_len + i, wrap_size[i]);
689 bmap = isl_basic_map_add_constraint(bmap, c);
691 c = isl_inequality_alloc(isl_dim_copy(dim));
692 isl_constraint_set_coefficient_si(c, isl_dim_out,
693 first + wrap_len + i, 1);
694 bmap = isl_basic_map_add_constraint(bmap, c);
696 c = isl_inequality_alloc(isl_dim_copy(dim));
697 isl_constraint_set_coefficient_si(c, isl_dim_out,
698 first + wrap_len + i, -1);
699 isl_constraint_set_constant_si(c, wrap_size[i] - 1);
700 bmap = isl_basic_map_add_constraint(bmap, c);
703 isl_dim_free(dim);
705 bmap = isl_basic_map_project_out(bmap, isl_dim_out,
706 first + 2 * wrap_len, wrap_len);
708 return isl_map_from_basic_map(bmap);
711 /* Add "n" parameters named prefix%d.
713 static __isl_give isl_set *add_params( __isl_take isl_set *set,
714 int n, const char *prefix)
716 int i;
717 unsigned nparam;
718 char name[20];
720 nparam = isl_set_dim(set, isl_dim_param);
721 set = isl_set_add_dims(set, isl_dim_param, n);
723 for (i = 0; i < n; ++i) {
724 snprintf(name, sizeof(name), "%s%d", prefix, i);
725 set = isl_set_set_dim_name(set, isl_dim_param,
726 nparam + i, name);
729 return set;
732 /* Equate the "n" dimensions of "set" starting at "first" to
733 * freshly created parameters named prefix%d.
735 static __isl_give isl_set *parametrize(__isl_take isl_set *set,
736 int first, int n, const char *prefix)
738 int i;
739 unsigned nparam;
740 isl_int v;
741 isl_dim *dim;
742 isl_basic_set *bset;
743 isl_constraint *c;
745 nparam = isl_set_dim(set, isl_dim_param);
747 set = add_params(set, n, prefix);
749 dim = isl_set_get_dim(set);
750 bset = isl_basic_set_universe(isl_dim_copy(dim));
752 isl_int_init(v);
754 for (i = 0; i < n; ++i) {
755 c = isl_equality_alloc(isl_dim_copy(dim));
756 isl_int_set_si(v, -1);
757 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
758 isl_int_set_si(v, 1);
759 isl_constraint_set_coefficient(c, isl_dim_set, first + i, v);
760 bset = isl_basic_set_add_constraint(bset, c);
763 isl_int_clear(v);
764 isl_dim_free(dim);
766 return isl_set_intersect(set, isl_set_from_basic_set(bset));
769 static __isl_give isl_set *parametrization(__isl_take isl_dim *dim,
770 int len, int first, int n, const char *prefix)
772 isl_set *set;
774 dim = isl_dim_add(dim, isl_dim_set, len);
775 set = isl_set_universe(dim);
777 return parametrize(set, first, n, prefix);
780 /* Tile the B loops over the tile sizes and then tile/wrap
781 * the T1 loops over the blocks.
783 static __isl_give isl_union_map *tile_schedule(struct cuda_gen *gen,
784 __isl_take isl_union_map *sched)
786 isl_dim *dim;
787 isl_map *tiling, *block_tiling;
789 dim = isl_union_map_get_dim(sched);
790 tiling = tile(isl_dim_copy(dim), gen->untiled_len,
791 gen->tile_first, gen->tile_len, gen->tile_size);
793 if (gen->options->wrap)
794 block_tiling = wrap(dim, gen->untiled_len + gen->tile_len,
795 gen->tile_first, gen->n_grid, gen->grid_dim);
796 else
797 block_tiling = tile(dim, gen->untiled_len + gen->tile_len,
798 gen->tile_first, gen->n_grid, gen->grid_dim);
800 gen->tiled_len = gen->untiled_len + gen->tile_len + gen->n_grid;
802 tiling = isl_map_apply_range(tiling, block_tiling);
804 sched = isl_union_map_apply_range(sched,
805 isl_union_map_from_map(tiling));
807 gen->shared_len = gen->tile_first + gen->tile_len + gen->n_grid;
809 return sched;
812 static __isl_give isl_union_map *parametrize_tiled_schedule(
813 struct cuda_gen *gen, __isl_take isl_union_map *sched)
815 isl_dim *dim;
816 isl_set *par;
818 dim = isl_union_map_get_dim(sched);
819 par = parametrization(dim, gen->tiled_len, 0, gen->tile_first, "h");
820 sched = isl_union_map_intersect_range(sched,
821 isl_union_set_from_set(par));
823 dim = isl_union_map_get_dim(sched);
824 par = parametrization(dim, gen->tiled_len,
825 gen->tile_first + gen->n_grid, gen->n_grid, "b");
826 sched = isl_union_map_intersect_range(sched,
827 isl_union_set_from_set(par));
829 return sched;
832 /* Tile/wrap the P1 loops over the threads.
834 static __isl_give isl_union_map *thread_tile_schedule(struct cuda_gen *gen,
835 __isl_take isl_union_map *sched)
837 isl_dim *dim;
838 isl_map *tiling;
839 isl_set *par;
841 dim = isl_union_map_get_dim(sched);
843 if (gen->options->wrap)
844 tiling = wrap(isl_dim_copy(dim), gen->tiled_len,
845 gen->shared_len, gen->n_block, gen->block_dim);
846 else
847 tiling = tile(isl_dim_copy(dim), gen->tiled_len,
848 gen->shared_len, gen->n_block, gen->block_dim);
849 gen->thread_tiled_len = gen->tiled_len + gen->n_block;
851 sched = isl_union_map_apply_range(sched,
852 isl_union_map_from_map(tiling));
854 par = parametrization(dim, gen->thread_tiled_len,
855 gen->tile_first + gen->tile_len + gen->n_grid + gen->n_block,
856 gen->n_block, "t");
857 sched = isl_union_map_intersect_range(sched,
858 isl_union_set_from_set(par));
860 gen->shared_len = gen->tile_first + gen->tile_len + gen->n_grid;
862 return sched;
865 /* If the user asked for it, scale the shared memory tile loops
866 * (T1P and T2) of "sched" by gen->tile_size[i].
867 * If we are not performing "wrapping", then additionally scale the T1P
868 * loops by gen->grid_dim[i].
870 static __isl_give isl_union_map *scale_tile_loops(struct cuda_gen *gen,
871 __isl_take isl_union_map *sched)
873 int i;
874 isl_dim *dim;
875 isl_basic_map *scale;
876 isl_constraint *c;
878 if (!gen->options->scale_tile_loops)
879 return sched;
881 dim = isl_union_map_get_dim(sched);
882 dim = isl_dim_add(dim, isl_dim_in, gen->tiled_len);
883 dim = isl_dim_add(dim, isl_dim_out, gen->tiled_len);
884 scale = isl_basic_map_universe(isl_dim_copy(dim));
886 for (i = 0; i < gen->tiled_len; ++i) {
887 int f = 1;
889 if (i >= gen->tile_first && i < gen->tile_first + gen->n_grid) {
890 f = gen->tile_size[i - gen->tile_first];
891 if (!gen->options->wrap)
892 f *= gen->grid_dim[i - gen->tile_first];
893 } else if (i >= gen->tile_first + gen->n_grid &&
894 i < gen->tile_first + gen->n_grid + gen->tile_len) {
895 f = gen->tile_size[i - (gen->tile_first + gen->n_grid)];
898 c = isl_equality_alloc(isl_dim_copy(dim));
899 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
900 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
901 scale = isl_basic_map_add_constraint(scale, c);
904 isl_dim_free(dim);
906 sched = isl_union_map_apply_range(sched,
907 isl_union_map_from_map(isl_map_from_basic_map(scale)));
909 return sched;
912 /* If we are not performing "wrapping" and if the user asked for it,
913 * scale the thread tile loops (P1T) of "sched" by gen->block_dim[i].
915 static __isl_give isl_union_map *scale_thread_tile_loops(struct cuda_gen *gen,
916 __isl_take isl_union_map *sched)
918 int i;
919 isl_dim *dim;
920 isl_basic_map *scale;
921 isl_constraint *c;
923 if (gen->options->wrap)
924 return sched;
925 if (!gen->options->scale_tile_loops)
926 return sched;
928 dim = isl_union_map_get_dim(sched);
929 dim = isl_dim_add(dim, isl_dim_in, gen->thread_tiled_len);
930 dim = isl_dim_add(dim, isl_dim_out, gen->thread_tiled_len);
931 scale = isl_basic_map_universe(isl_dim_copy(dim));
933 for (i = 0; i < gen->thread_tiled_len; ++i) {
934 int f = 1;
936 if (i >= gen->shared_len &&
937 i < gen->shared_len + gen->n_block)
938 f = gen->block_dim[i - gen->shared_len];
940 c = isl_equality_alloc(isl_dim_copy(dim));
941 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
942 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
943 scale = isl_basic_map_add_constraint(scale, c);
946 isl_dim_free(dim);
948 sched = isl_union_map_apply_range(sched,
949 isl_union_map_from_map(isl_map_from_basic_map(scale)));
951 return sched;
954 /* If we are not performing "wrapping" and if the user asked for it,
955 * scale the "n_tile" loops starting at "first" of "sched" by gen->block_dim[i].
957 static __isl_give isl_union_map *scale_access_tile_loops(struct cuda_gen *gen,
958 __isl_take isl_union_map *sched, int len, int first, int n_tile)
960 int i;
961 isl_dim *dim;
962 isl_basic_map *scale;
963 isl_constraint *c;
965 if (gen->options->wrap)
966 return sched;
967 if (!gen->options->scale_tile_loops)
968 return sched;
970 dim = isl_union_map_get_dim(sched);
971 dim = isl_dim_add(dim, isl_dim_in, len);
972 dim = isl_dim_add(dim, isl_dim_out, len);
973 scale = isl_basic_map_universe(isl_dim_copy(dim));
975 for (i = 0; i < len; ++i) {
976 int f = 1;
978 if (i >= first && i < first + n_tile)
979 f = gen->block_dim[i - first];
981 c = isl_equality_alloc(isl_dim_copy(dim));
982 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
983 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
984 scale = isl_basic_map_add_constraint(scale, c);
987 isl_dim_free(dim);
989 sched = isl_union_map_apply_range(sched,
990 isl_union_map_from_map(isl_map_from_basic_map(scale)));
992 return sched;
995 /* If print_user_stmt is set, we want to print the statements ourselves,
996 * instead of relying on the C preprocessor. If so, we need to use
997 * the stop option so that the domains will be saved on the statement
998 * nodes.
1000 static void print_cloog_shared_body(struct cuda_gen *gen,
1001 __isl_keep isl_set *context, __isl_keep isl_union_map *sched, int len,
1002 void (*print_user_stmt)(struct gpucode_info *info,
1003 struct clast_user_stmt *s),
1004 int first_unroll)
1006 int i;
1007 CloogOptions *options;
1008 CloogDomain *cloog_context;
1009 CloogUnionDomain *ud;
1010 CloogInput *input;
1011 struct clast_stmt *stmt;
1012 char name[20];
1014 sched = isl_union_map_copy(sched);
1015 sched = isl_union_map_align_params(sched, isl_set_get_dim(context));
1017 options = cloog_options_malloc(gen->state);
1018 options->language = LANGUAGE_C;
1019 options->strides = 1;
1020 options->sh = 1;
1021 options->f = len;
1022 options->l = -1;
1023 options->override = 1;
1024 options->save_domains = 1;
1025 options->noscalars = 1;
1026 options->first_unroll = first_unroll;
1028 ud = cloog_union_domain_from_isl_union_map(sched);
1029 for (i = 0; i < len; ++i) {
1030 snprintf(name, sizeof(name), "c%d", i);
1031 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
1033 cloog_context = cloog_domain_from_isl_set(isl_set_copy(context));
1034 input = cloog_input_alloc(cloog_context, ud);
1036 stmt = cloog_clast_create_from_input(input, options);
1038 gen->stmt_code.indent = gen->kernel_code.indent;
1039 gen->stmt_code.dst = gen->cuda.kernel_c;
1040 gen->stmt_code.print_user_stmt = print_user_stmt;
1041 gen->stmt_code.print_user_stmt_list = NULL;
1042 gen->stmt_code.print_for_head = NULL;
1043 gen->stmt_code.print_for_foot = NULL;
1044 gen->stmt_code.user = gen;
1045 gpu_print_host_stmt(&gen->stmt_code, stmt);
1047 cloog_clast_free(stmt);
1048 cloog_options_free(options);
1051 /* Add "len" parameters p[i] called prefix%d,
1052 * with bounds to 0 <= p[i] < size[i].
1054 __isl_give isl_set *add_bounded_parameters(__isl_take isl_set *set,
1055 int len, int *size, const char *prefix)
1057 int i;
1058 unsigned nparam;
1059 isl_int v;
1060 isl_dim *dim;
1061 isl_basic_set *bset;
1062 isl_constraint *c;
1063 char name[20];
1065 nparam = isl_set_dim(set, isl_dim_param);
1066 set = isl_set_add_dims(set, isl_dim_param, len);
1068 for (i = 0; i < len; ++i) {
1069 snprintf(name, sizeof(name), "%s%d", prefix, i);
1070 set = isl_set_set_dim_name(set, isl_dim_param,
1071 nparam + i, name);
1074 dim = isl_set_get_dim(set);
1075 bset = isl_basic_set_universe(isl_dim_copy(dim));
1077 isl_int_init(v);
1079 for (i = 0; i < len; ++i) {
1080 c = isl_inequality_alloc(isl_dim_copy(dim));
1081 isl_int_set_si(v, 1);
1082 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
1083 bset = isl_basic_set_add_constraint(bset, c);
1085 c = isl_inequality_alloc(isl_dim_copy(dim));
1086 isl_int_set_si(v, -1);
1087 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
1088 isl_int_set_si(v, size[i] - 1);
1089 isl_constraint_set_constant(c, v);
1090 bset = isl_basic_set_add_constraint(bset, c);
1093 isl_int_clear(v);
1094 isl_dim_free(dim);
1096 return isl_set_intersect(set, isl_set_from_basic_set(bset));
1099 static void print_shared_body(struct cuda_gen *gen,
1100 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *sched,
1101 int len, void (*print_user_stmt)(struct gpucode_info *info,
1102 struct clast_user_stmt *s),
1103 int first_unroll)
1105 isl_set *context;
1107 context = isl_set_copy(shared_domain);
1108 context = parametrize(context, 0, gen->shared_len, "g");
1109 context = isl_set_project_out(context, isl_dim_set, 0, gen->shared_len);
1110 context = add_bounded_parameters(context,
1111 gen->n_block, gen->block_dim, "t");
1113 print_cloog_shared_body(gen, context, sched, len, print_user_stmt,
1114 first_unroll);
1116 isl_set_free(context);
1119 /* Given a tile of an array, construct a map that maps each element
1120 * of the tile to a copy of the tile shifted to the origin
1121 * (based on the lower bounds in group->private_bound or group->shared_bound).
1122 * If any of the indices is strided, then {private,shared}_bound[i].shift_map
1123 * is applied to the index first.
1124 * The domain of the resulting map is "access",
1125 * while the range space is anonymous.
1127 static __isl_give isl_map *shift_access(__isl_take isl_set *access,
1128 struct cuda_array_ref_group *group)
1130 int i;
1131 isl_dim *dim;
1132 isl_basic_set *bset;
1133 isl_basic_map *bmap;
1134 isl_qpolynomial *lb;
1135 isl_basic_set *offset;
1136 isl_basic_map *shift;
1137 isl_basic_map *pre_shift;
1138 isl_map *sched;
1139 const char *name;
1140 struct cuda_array_bound *bounds;
1141 int n_index = group->array->n_index;
1143 bounds = group->private_bound;
1144 if (!bounds)
1145 bounds = group->shared_bound;
1147 dim = isl_set_get_dim(access);
1148 dim = isl_dim_drop(dim, isl_dim_set, 0, n_index);
1149 offset = isl_basic_set_universe(dim);
1150 for (i = 0; i < n_index; ++i) {
1151 lb = isl_qpolynomial_copy(bounds[i].lb);
1152 bmap = isl_basic_map_from_qpolynomial(lb);
1153 bset = isl_basic_map_range(bmap);
1154 offset = isl_basic_set_flat_product(offset, bset);
1156 offset = isl_basic_set_neg(offset);
1158 dim = isl_dim_map_from_set(isl_set_get_dim(access));
1159 shift = isl_basic_map_identity(dim);
1160 shift = isl_basic_map_set_tuple_name(shift, isl_dim_out, NULL);
1162 bset = isl_basic_set_universe(isl_set_get_dim(access));
1163 bmap = isl_basic_map_from_domain_and_range(bset, offset);
1165 shift = isl_basic_map_sum(shift, bmap);
1167 dim = isl_set_get_dim(access);
1168 dim = isl_dim_drop(dim, isl_dim_set, 0, n_index);
1169 dim = isl_dim_map_from_set(dim);
1170 pre_shift = isl_basic_map_universe(isl_dim_copy(dim));
1171 dim = isl_dim_add(dim, isl_dim_in, 1);
1172 dim = isl_dim_add(dim, isl_dim_out, 1);
1173 for (i = 0; i < n_index; ++i) {
1174 if (!bounds[i].shift_map)
1175 bmap = isl_basic_map_identity(isl_dim_copy(dim));
1176 else
1177 bmap = isl_basic_map_copy(bounds[i].shift_map);
1178 pre_shift = isl_basic_map_flat_product(pre_shift, bmap);
1180 isl_dim_free(dim);
1181 name = isl_basic_map_get_tuple_name(shift, isl_dim_in);
1182 pre_shift = isl_basic_map_set_tuple_name(pre_shift, isl_dim_in, name);
1183 pre_shift = isl_basic_map_set_tuple_name(pre_shift, isl_dim_out, name);
1184 shift = isl_basic_map_apply_range(pre_shift, shift);
1186 sched = isl_map_from_basic_map(shift);
1187 sched = isl_map_intersect_domain(sched, access);
1189 return sched;
1192 /* Construct a schedule for iterating over all elements in the given
1193 * piece of an array. The schedule iterates over a copy of the piece
1194 * that is shifted to the origin.
1195 * We subsequently also perform the tiling/wrapping over the threads.
1197 * In particular, we tile the final iterators so that the final thread
1198 * dimension runs over the final array dimension.
1199 * However, if those final iterators have only a single iteration,
1200 * we try to tile earlier iterators instead.
1202 static __isl_give isl_union_map *access_schedule(struct cuda_gen *gen,
1203 __isl_take isl_set *access, struct cuda_array_ref_group *group)
1205 isl_dim *dim;
1206 isl_map *sched;
1207 isl_union_map *usched;
1208 isl_map *tiling;
1209 isl_set *par;
1210 unsigned nvar = isl_set_dim(access, isl_dim_set);
1211 int n_tile;
1212 int first;
1214 sched = shift_access(access, group);
1216 n_tile = gen->n_block;
1217 if (n_tile > nvar) {
1218 int i;
1219 sched = isl_map_insert(sched, isl_dim_out, 0, n_tile - nvar);
1220 for (i = 0; i < n_tile - nvar; ++i)
1221 sched = isl_map_fix_si(sched, isl_dim_out, i, 0);
1222 nvar = n_tile;
1225 first = nvar - n_tile;
1227 for (; first > 0; first --)
1228 if (!isl_map_plain_is_fixed(sched, isl_dim_out,
1229 first + n_tile - 1, NULL))
1230 break;
1232 dim = isl_map_get_dim(sched);
1233 dim = isl_dim_drop(dim, isl_dim_in, 0, isl_dim_size(dim, isl_dim_in));
1234 dim = isl_dim_drop(dim, isl_dim_out, 0, nvar);
1235 if (gen->options->wrap)
1236 tiling = wrap(isl_dim_copy(dim), nvar, first,
1237 n_tile, gen->block_dim);
1238 else
1239 tiling = tile(isl_dim_copy(dim), nvar, first,
1240 n_tile, gen->block_dim);
1241 sched = isl_map_apply_range(sched, tiling);
1243 par = parametrization(dim, nvar + n_tile, first + n_tile, n_tile, "t");
1244 usched = isl_union_map_from_map(sched);
1245 usched = isl_union_map_intersect_range(usched,
1246 isl_union_set_from_set(par));
1248 usched = scale_access_tile_loops(gen, usched, nvar + n_tile,
1249 first, n_tile);
1251 return usched;
1254 static void print_shared_access(struct cuda_gen *gen,
1255 __isl_keep isl_set *shared_domain, __isl_take isl_set *access,
1256 const char *type, struct cuda_array_ref_group *group)
1258 const char *array_name;
1259 char *name;
1260 isl_ctx *ctx;
1261 isl_union_map *sched;
1262 unsigned nvar = isl_set_dim(access, isl_dim_set);
1263 int n_tile;
1265 ctx = isl_set_get_ctx(access);
1266 array_name = isl_set_get_tuple_name(access);
1267 name = isl_alloc_array(ctx, char,
1268 strlen(type) + sizeof("_shared_") + strlen(array_name) + 20);
1269 if (group->array->n_group > 1)
1270 sprintf(name, "%s_shared_%s_%d", type, array_name, group->nr);
1271 else
1272 sprintf(name, "%s_shared_%s", type, array_name);
1273 access = isl_set_set_tuple_name(access, name);
1274 free(name);
1276 sched = access_schedule(gen, access, group);
1278 n_tile = gen->n_block;
1279 if (n_tile > nvar)
1280 n_tile = nvar;
1282 print_shared_body(gen, shared_domain, sched, nvar + n_tile, NULL, -1);
1284 isl_union_map_free(sched);
1287 /* Return the union of all read (read = 1) and/or write (write = 1)
1288 * access relations in the group.
1290 static __isl_give isl_union_map *group_access_relation(
1291 struct cuda_array_ref_group *group, int read, int write)
1293 int i;
1294 isl_union_map *access;
1296 access = isl_union_map_empty(isl_map_get_dim(group->access));
1297 for (i = 0; i < group->n_ref; ++i) {
1298 isl_map *map_i;
1300 if (!((read && group->refs[i]->read) ||
1301 (write && group->refs[i]->write)))
1302 continue;
1303 map_i = isl_map_copy(group->refs[i]->access);
1304 access = isl_union_map_union(access,
1305 isl_union_map_from_map(map_i));
1308 return access;
1311 /* Print code for reading into or writing from shared memory
1312 * the given array reference group.
1314 * sched maps the original iteration domains to the shared memory tile loops.
1316 static int print_group_shared_accesses(struct cuda_gen *gen,
1317 struct cuda_array_ref_group *group, const char *type,
1318 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *sched)
1320 int read;
1321 isl_union_map *access;
1322 isl_union_set *uset;
1323 isl_set *access_set;
1325 if (group->private_bound)
1326 return 0;
1327 if (!group->shared_bound)
1328 return 0;
1330 read = !strcmp(type, "read");
1332 access = group_access_relation(group, read, !read);
1333 access = isl_union_map_apply_domain(access, isl_union_map_copy(sched));
1334 uset = isl_union_map_range(access);
1336 if (isl_union_set_is_empty(uset)) {
1337 isl_union_set_free(uset);
1338 return 0;
1341 access_set = isl_union_set_copy_set(uset);
1342 isl_union_set_free(uset);
1343 access_set = isl_set_coalesce(access_set);
1345 print_shared_access(gen, shared_domain, access_set, type, group);
1347 return 1;
1350 /* Print code for reading into or writing from shared memory at
1351 * the given level (-1 for innermost).
1353 * If we are not printing at the innermost level, then the dimensionality
1354 * of shared_domain may be smaller than gen->shared_len.
1355 * As the rest of the code assumes that the domain of access has
1356 * gen->shared_len dimensions, we therefore may need to embed this domain
1357 * in a higher dimensional space after intersection with shared_domain.
1359 static void print_shared_accesses(struct cuda_gen *gen,
1360 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *access,
1361 const char *type, int level)
1363 int i, j;
1364 isl_dim *dim;
1365 isl_map *proj;
1366 isl_set *par;
1367 int shared_len = isl_set_dim(shared_domain, isl_dim_set);
1368 int sync = 0;
1369 isl_union_map *sched;
1371 shared_domain = isl_set_copy(shared_domain);
1372 sched = isl_union_map_copy(gen->tiled_sched);
1373 dim = isl_union_map_get_dim(sched);
1374 proj = projection(dim, gen->tiled_len, shared_len);
1375 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
1376 sched = isl_union_map_intersect_range(sched,
1377 isl_union_set_from_set(isl_set_copy(shared_domain)));
1378 if (shared_len != gen->shared_len) {
1379 dim = isl_union_map_get_dim(sched);
1380 proj = projection(dim, gen->shared_len, shared_len);
1381 proj = isl_map_reverse(proj);
1382 shared_domain = isl_set_apply(shared_domain,
1383 isl_map_copy(proj));
1384 sched = isl_union_map_apply_range(sched,
1385 isl_union_map_from_map(proj));
1388 dim = isl_union_map_get_dim(sched);
1389 par = parametrization(dim, gen->shared_len, 0, gen->shared_len, "g");
1390 sched = isl_union_map_intersect_range(sched,
1391 isl_union_set_from_set(par));
1393 for (i = 0; i < gen->n_array; ++i) {
1394 struct cuda_array_info *array = &gen->array[i];
1396 if (gen->array[i].print_shared_level != level)
1397 continue;
1399 for (j = 0; j < array->n_group; ++j) {
1400 if (print_group_shared_accesses(gen, array->groups[j],
1401 type, shared_domain, sched))
1402 sync = 1;
1406 isl_union_map_free(sched);
1407 isl_set_free(shared_domain);
1409 if (sync) {
1410 print_indent(gen->cuda.kernel_c, gen->kernel_code.indent);
1411 fprintf(gen->cuda.kernel_c, "__syncthreads();\n");
1415 /* Given an index expression into a tile of an array, adjust the expression
1416 * to a shift of the tile to the origin
1417 * (based on the lower bounds in array->shared_bound).
1418 * If the index is strided, then we first add
1419 * bound->shift and divide by bound->stride.
1421 static __isl_give isl_qpolynomial *shift_index(__isl_take isl_qpolynomial *qp,
1422 struct cuda_array_info *array,
1423 struct cuda_array_bound *bound, __isl_take isl_set *domain)
1425 isl_qpolynomial *lb;
1427 if (bound->shift) {
1428 isl_qpolynomial *shift, *t;
1429 isl_int one;
1430 isl_dim *dim;
1431 shift = bound->shift;
1432 shift = isl_qpolynomial_copy(shift);
1433 shift = isl_qpolynomial_drop_dims(shift, isl_dim_set, 0,
1434 isl_qpolynomial_dim(shift, isl_dim_set));
1435 shift = isl_qpolynomial_align_params(shift,
1436 isl_qpolynomial_get_dim(qp));
1437 qp = isl_qpolynomial_add(qp, shift);
1438 dim = isl_qpolynomial_get_dim(qp);
1439 isl_int_init(one);
1440 isl_int_set_si(one, 1);
1441 t = isl_qpolynomial_rat_cst(dim, one, bound->stride);
1442 isl_int_clear(one);
1443 qp = isl_qpolynomial_mul(qp, t);
1446 lb = isl_qpolynomial_copy(bound->lb);
1447 lb = isl_qpolynomial_drop_dims(lb, isl_dim_set, 0,
1448 isl_qpolynomial_dim(lb, isl_dim_set));
1450 lb = isl_qpolynomial_align_params(lb, isl_qpolynomial_get_dim(qp));
1452 qp = isl_qpolynomial_sub(qp, lb);
1453 qp = isl_qpolynomial_gist(qp, domain);
1455 return qp;
1458 /* This function is called for each access to an array in some statement
1459 * in the original code.
1460 * Replace that access by an access to shared or (linearized) global memory.
1461 * Since the array in shared memory is just
1462 * a shifted copy of part of the original array, we simply need
1463 * to subtract the lower bound, which was computed
1464 * in can_tile_for_shared_memory.
1465 * If any of the indices is strided, then we first add
1466 * shared_bound[i].shift and divide by shared_bound[i].stride.
1468 * If the given array is accessed directly from global memory,
1469 * we don't need to perform any shifting and simply simplify
1470 * expression in the context of the domain instead.
1472 * If the array space (range of access) has no name, then we are
1473 * accessing an iterator in the original program.
1475 static void print_access(struct cuda_gen *gen, __isl_take isl_map *access,
1476 int group_nr)
1478 int i;
1479 const char *name;
1480 unsigned n_index;
1481 struct cuda_array_info *array = NULL;
1482 isl_printer *prn;
1483 isl_basic_set *aff;
1484 isl_set *data_set;
1485 isl_set *domain;
1486 struct cuda_array_bound *bounds = NULL;
1488 access = isl_map_align_params(access,
1489 isl_set_get_dim(gen->stmt_domain));
1491 data_set = isl_set_apply(isl_set_copy(gen->stmt_domain), access);
1493 name = isl_set_get_tuple_name(data_set);
1495 if (!name)
1496 fprintf(gen->cuda.kernel_c, "(");
1497 else {
1498 struct cuda_array_ref_group *group;
1500 for (i = 0; i < gen->n_array; ++i) {
1501 if (strcmp(name, gen->array[i].name))
1502 continue;
1503 array = &gen->array[i];
1505 assert(array);
1506 group = array->groups[group_nr];
1507 bounds = group->private_bound;
1508 if (!bounds)
1509 bounds = group->shared_bound;
1511 print_array_name(gen->cuda.kernel_c, group);
1512 fprintf(gen->cuda.kernel_c, "[");
1516 n_index = isl_set_dim(data_set, isl_dim_set);
1517 aff = isl_set_affine_hull(data_set);
1519 prn = isl_printer_to_file(gen->ctx, gen->cuda.kernel_c);
1520 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1522 if (!bounds)
1523 for (i = 0; i + 1 < n_index; ++i)
1524 prn = isl_printer_print_str(prn, "(");
1526 for (i = 0; i < n_index; ++i) {
1527 isl_constraint *c;
1528 isl_qpolynomial *qp;
1529 int ok;
1531 ok = isl_basic_set_has_defining_equality(aff,
1532 isl_dim_out, i, &c);
1533 assert(ok);
1534 qp = isl_qpolynomial_from_constraint(c, isl_dim_out, i);
1535 qp = isl_qpolynomial_drop_dims(qp, isl_dim_set, 0,
1536 isl_qpolynomial_dim(qp, isl_dim_set));
1538 if (!array) {
1539 prn = isl_printer_print_qpolynomial(prn, qp);
1540 isl_qpolynomial_free(qp);
1541 continue;
1544 domain = isl_set_copy(gen->stmt_domain);
1545 domain = isl_set_project_out(domain, isl_dim_set, 0,
1546 isl_set_dim(domain, isl_dim_set));
1547 if (!bounds)
1548 qp = isl_qpolynomial_gist(qp, domain);
1549 else
1550 qp = shift_index(qp, array, &bounds[i], domain);
1552 if (i) {
1553 if (!bounds) {
1554 prn = isl_printer_print_str(prn, ") * (");
1555 prn = isl_printer_print_pw_qpolynomial_fold(prn,
1556 array->local_bound[i]);
1557 prn = isl_printer_print_str(prn, ") + ");
1558 } else
1559 prn = isl_printer_print_str(prn, "][");
1561 prn = isl_printer_print_qpolynomial(prn, qp);
1562 isl_qpolynomial_free(qp);
1564 if (!name)
1565 prn = isl_printer_print_str(prn, ")");
1566 else
1567 prn = isl_printer_print_str(prn, "]");
1568 isl_printer_free(prn);
1570 isl_basic_set_free(aff);
1573 static void print_stmt_body(struct cuda_gen *gen,
1574 FILE *out, struct cuda_stmt *stmt)
1576 int last = 0;
1577 struct cuda_stmt_access *access;
1579 for (access = stmt->accesses; access; access = access->next) {
1580 fwrite(stmt->text + last, 1, access->text_offset - last, out);
1581 last = access->text_offset + access->text_len;
1583 print_access(gen, isl_map_copy(access->access),
1584 access->group);
1587 fprintf(out, "%s\n", stmt->text + last);
1590 /* This function is called for each leaf in the innermost clast,
1591 * i.e., for each statemetn.
1592 * We print the statement body, simplifying the accesses based
1593 * on the schedule.
1595 static void print_statement(struct gpucode_info *code,
1596 struct clast_user_stmt *u)
1598 struct cuda_gen *gen = code->user;
1599 isl_dim *dim;
1600 isl_set *par;
1601 isl_set *stmt_domain;
1602 isl_union_map *stmt_sched;
1603 isl_union_set *uset;
1604 int nr;
1605 struct cuda_stmt *stmt;
1607 nr = atoi(u->statement->name + 2);
1608 stmt = &gen->stmts[nr];
1610 stmt_domain = extract_host_domain(u);
1612 stmt_sched = isl_union_map_intersect_range(
1613 isl_union_map_copy(gen->local_sched),
1614 isl_union_set_from_set(extend(stmt_domain,
1615 gen->thread_tiled_len)));
1616 dim = isl_union_map_get_dim(stmt_sched);
1617 par = parametrization(dim, gen->thread_tiled_len, 0,
1618 gen->thread_tiled_len, "c");
1619 stmt_sched = isl_union_map_intersect_range(stmt_sched,
1620 isl_union_set_from_set(par));
1622 uset = isl_union_map_domain(stmt_sched);
1623 dim = isl_union_set_get_dim(uset);
1624 dim = isl_dim_add(dim, isl_dim_set,
1625 isl_set_dim(stmt->domain, isl_dim_set));
1626 dim = isl_dim_set_tuple_name(dim, isl_dim_set, u->statement->name);
1627 gen->stmt_domain = isl_union_set_extract_set(uset, dim);
1628 isl_union_set_free(uset);
1630 print_indent(code->dst, code->indent);
1631 print_stmt_body(gen, code->dst, stmt);
1633 isl_set_free(gen->stmt_domain);
1636 /* Print an access to the element in the global memory copy of the
1637 * given array that corresponds to element [qp[0]][qp[1]]...
1638 * of the original array.
1639 * The copy in global memory has been linearized, so we need to take
1640 * the array size into account.
1642 static void print_private_global_index(isl_ctx *ctx, FILE *out,
1643 struct cuda_array_info *array, __isl_keep isl_qpolynomial **qp)
1645 int i;
1646 isl_printer *prn;
1648 fprintf(out, "%s[", array->name);
1649 prn = isl_printer_to_file(ctx, out);
1650 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1651 for (i = 0; i + 1 < array->n_index; ++i)
1652 prn = isl_printer_print_str(prn, "(");
1653 for (i = 0; i < array->n_index; ++i) {
1654 if (i) {
1655 prn = isl_printer_print_str(prn, ") * (");
1656 prn = isl_printer_print_pw_qpolynomial_fold(prn,
1657 array->local_bound[i]);
1658 prn = isl_printer_print_str(prn, ") + ");
1660 prn = isl_printer_print_qpolynomial(prn, qp[i]);
1662 isl_printer_free(prn);
1663 fprintf(out, "]");
1666 /* Print an access to the element in the shared memory copy of the
1667 * given array reference group that corresponds to element [qps[0]][qps[1]]...
1668 * of the original array.
1669 * Since the array in shared memory is just a shifted copy of part
1670 * of the original array, we simply need to subtract the lower bound,
1671 * which was computed in can_tile_for_shared_memory.
1672 * If any of the indices is strided, then we first add
1673 * shared_bound[i].shift and divide by shared_bound[i].stride.
1675 static void print_private_local_index(isl_ctx *ctx, FILE *out,
1676 struct cuda_array_ref_group *group,
1677 __isl_keep isl_qpolynomial **qps, __isl_keep isl_set *domain)
1679 int i;
1680 isl_printer *prn;
1681 struct cuda_array_info *array = group->array;
1682 struct cuda_array_bound *bounds = group->private_bound;
1684 print_array_name(out, group);
1685 for (i = 0; i < array->n_index; ++i) {
1686 isl_qpolynomial *qp = isl_qpolynomial_copy(qps[i]);
1688 qp = shift_index(qp, array, &bounds[i], isl_set_copy(domain));
1690 fprintf(out, "[");
1691 prn = isl_printer_to_file(ctx, out);
1692 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1693 prn = isl_printer_print_qpolynomial(prn, qp);
1694 isl_printer_free(prn);
1695 fprintf(out, "]");
1696 isl_qpolynomial_free(qp);
1700 /* This function is called for each leaf in the clast of the code
1701 * for copying to or from private memory.
1702 * The statement name is read_private_<array> or write_private_<array>.
1704 * The schedule iterates over the array elements, so we can use
1705 * the domain of private_sched at the current scheduling position
1706 * as the index of the array.
1708 static void print_private_copy_statement(struct gpucode_info *code,
1709 struct clast_user_stmt *u)
1711 struct cuda_gen *gen = code->user;
1712 isl_set *domain;
1713 isl_map *sched;
1714 struct cuda_array_ref_group *group = gen->private_group;
1715 int i;
1716 unsigned n_in;
1717 unsigned n_out;
1718 isl_dim *dim;
1719 isl_set *param;
1720 isl_set *index;
1721 isl_basic_set *aff;
1722 isl_ctx *ctx;
1723 isl_qpolynomial **qp;
1724 int read;
1726 read = !strncmp(u->statement->name, "read", 4);
1728 domain = extract_host_domain(u);
1729 assert(domain);
1731 sched = isl_map_copy(gen->private_sched);
1732 sched = isl_map_reverse(sched);
1733 sched = isl_map_intersect_domain(sched, domain);
1734 n_in = isl_map_dim(sched, isl_dim_in);
1735 n_out = isl_map_dim(sched, isl_dim_out);
1736 dim = isl_map_get_dim(sched);
1737 dim = isl_dim_drop(dim, isl_dim_in, 0, n_in);
1738 dim = isl_dim_drop(dim, isl_dim_out, 0, n_out);
1739 param = parametrization(dim, n_in, 0, n_in, "c");
1740 sched = isl_map_align_params(sched, isl_set_get_dim(param));
1741 sched = isl_map_intersect_domain(sched, param);
1742 index = isl_map_range(sched);
1743 domain = isl_set_copy(index);
1744 aff = isl_set_affine_hull(index);
1745 domain = isl_set_project_out(domain, isl_dim_set, 0, n_out);
1747 ctx = isl_basic_set_get_ctx(aff);
1748 qp = isl_alloc_array(ctx, isl_qpolynomial *, n_out);
1749 assert(qp);
1751 for (i = 0; i < n_out; ++i) {
1752 isl_constraint *c;
1753 int ok;
1755 ok = isl_basic_set_has_defining_equality(aff,
1756 isl_dim_set, i, &c);
1757 assert(ok);
1758 qp[i] = isl_qpolynomial_from_constraint(c, isl_dim_set, i);
1759 qp[i] = isl_qpolynomial_drop_dims(qp[i], isl_dim_set, 0, n_out);
1762 print_indent(code->dst, code->indent);
1763 if (read) {
1764 print_private_local_index(ctx, code->dst, group, qp, domain);
1765 fprintf(code->dst, " = ");
1766 print_private_global_index(ctx, code->dst, group->array, qp);
1767 } else {
1768 print_private_global_index(ctx, code->dst, group->array, qp);
1769 fprintf(code->dst, " = ");
1770 print_private_local_index(ctx, code->dst, group, qp, domain);
1772 fprintf(code->dst, ";\n");
1774 for (i = 0; i < n_out; ++i)
1775 isl_qpolynomial_free(qp[i]);
1776 free(qp);
1778 isl_basic_set_free(aff);
1779 isl_set_free(domain);
1782 static void print_private_access(struct cuda_gen *gen,
1783 __isl_keep isl_set *shared_domain, __isl_take isl_set *access,
1784 const char *type, struct cuda_array_ref_group *group)
1786 const char *array_name;
1787 char *name;
1788 isl_ctx *ctx;
1789 unsigned nvar = isl_set_dim(access, isl_dim_set);
1790 isl_union_map *usched;
1792 if (isl_set_fast_is_empty(access)) {
1793 isl_set_free(access);
1794 return;
1797 ctx = isl_set_get_ctx(access);
1798 array_name = isl_set_get_tuple_name(access);
1799 name = isl_alloc_array(ctx, char,
1800 strlen(type) + sizeof("_private_") + strlen(array_name) + 20);
1801 if (group->array->n_group > 1)
1802 sprintf(name, "%s_private_%s_%d", type, array_name, group->nr);
1803 else
1804 sprintf(name, "%s_private_%s", type, array_name);
1805 access = isl_set_set_tuple_name(access, name);
1806 free(name);
1808 gen->private_sched = shift_access(access, group);
1809 gen->private_group = group;
1811 usched = isl_union_map_from_map(isl_map_copy(gen->private_sched));
1812 print_shared_body(gen, shared_domain, usched, nvar,
1813 &print_private_copy_statement, 1);
1814 isl_union_map_free(usched);
1816 isl_map_free(gen->private_sched);
1819 /* Print code for reading into or writing from private memory
1820 * the given array reference group.
1822 * sched maps the original iteration domains to the shared memory tile loops.
1824 static void print_group_private_accesses(struct cuda_gen *gen,
1825 struct cuda_array_ref_group *group,
1826 const char *type, __isl_keep isl_set *shared_domain,
1827 unsigned first_shared, int shared_len, __isl_keep isl_union_map *sched)
1829 int read;
1830 isl_union_map *access;
1831 isl_union_set *uset;
1832 isl_set *access_set;
1834 if (!group->private_bound)
1835 return;
1837 read = !strcmp(type, "read");
1839 access = group_access_relation(group, read, !read);
1840 access = isl_union_map_apply_domain(access, isl_union_map_copy(sched));
1841 access = isl_union_map_intersect(access,
1842 isl_union_map_copy(gen->private_access));
1843 uset = isl_union_map_range(access);
1845 if (isl_union_set_is_empty(uset)) {
1846 isl_union_set_free(uset);
1847 return;
1850 access_set = isl_union_set_copy_set(uset);
1851 isl_union_set_free(uset);
1852 access_set = isl_set_coalesce(access_set);
1853 access_set = isl_set_eliminate(access_set, isl_dim_param,
1854 first_shared + shared_len,
1855 gen->shared_len - shared_len);
1857 print_private_access(gen, shared_domain, access_set, type, group);
1860 /* Print code for reading into or writing from private memory at
1861 * the given level (-1 for innermost).
1863 * If we are not printing at the innermost level, then the dimensionality
1864 * of shared_domain may be smaller than gen->shared_len.
1865 * As the rest of the code assumes that the domain of access has
1866 * gen->shared_len dimensions, we therefore may need to embed this domain
1867 * in a higher dimensional space after intersection with shared_domain.
1869 * This code is very similar to print_shared_accesses.
1870 * The main difference is that we to take into account gen->private_access.
1872 static void print_private_accesses(struct cuda_gen *gen,
1873 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *access,
1874 const char *type, int level)
1876 int i, j;
1877 isl_dim *dim;
1878 isl_map *proj;
1879 int shared_len = isl_set_dim(shared_domain, isl_dim_set);
1880 unsigned first_shared;
1881 isl_union_map *sched;
1883 shared_domain = isl_set_copy(shared_domain);
1884 sched = isl_union_map_copy(gen->tiled_sched);
1885 dim = isl_union_map_get_dim(sched);
1886 first_shared = isl_dim_size(dim, isl_dim_param);
1887 proj = projection(dim, gen->tiled_len, shared_len);
1888 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
1889 sched = isl_union_map_intersect_range(sched,
1890 isl_union_set_from_set(isl_set_copy(shared_domain)));
1891 if (shared_len != gen->shared_len) {
1892 dim = isl_union_map_get_dim(sched);
1893 proj = projection(dim, gen->shared_len, shared_len);
1894 proj = isl_map_reverse(proj);
1895 shared_domain = isl_set_apply(shared_domain,
1896 isl_map_copy(proj));
1897 sched = isl_union_map_apply_range(sched,
1898 isl_union_map_from_map(proj));
1901 for (i = 0; i < gen->n_array; ++i) {
1902 struct cuda_array_info *array = &gen->array[i];
1904 if (gen->array[i].print_shared_level != level)
1905 continue;
1907 for (j = 0; j < array->n_group; ++j)
1908 print_group_private_accesses(gen, array->groups[j],
1909 type, shared_domain,
1910 first_shared, shared_len, sched);
1913 isl_union_map_free(sched);
1914 isl_set_free(shared_domain);
1917 /* Set unroll[j] if the input dimension j is involved in
1918 * the index expression represented by bmap.
1920 static int check_unroll(__isl_take isl_basic_map *bmap, void *user)
1922 int i, j;
1923 int n_in = isl_basic_map_dim(bmap, isl_dim_in);
1924 int n_out = isl_basic_map_dim(bmap, isl_dim_out);
1925 int *unroll = user;
1927 for (i = 0; i < n_out; ++i) {
1928 isl_constraint *c;
1929 int ok;
1931 ok = isl_basic_map_has_defining_equality(bmap,
1932 isl_dim_out, i, &c);
1933 assert(ok);
1934 for (j = 0; j < n_in; ++j)
1935 if (isl_constraint_involves_dims(c, isl_dim_in, j, 1))
1936 unroll[j] = 1;
1937 isl_constraint_free(c);
1940 isl_basic_map_free(bmap);
1941 return 0;
1944 /* Given an array pos mapping input dimensions to the corresponding
1945 * output dimension, construct the corresponding map.
1947 static __isl_give isl_map *permutation(__isl_take isl_dim *dim,
1948 int *pos, int len)
1950 int i;
1951 isl_constraint *c;
1952 isl_basic_map *bmap;
1954 dim = isl_dim_add(dim, isl_dim_in, len);
1955 dim = isl_dim_add(dim, isl_dim_out, len);
1956 bmap = isl_basic_map_universe(isl_dim_copy(dim));
1958 for (i = 0; i < len; ++i) {
1959 c = isl_equality_alloc(isl_dim_copy(dim));
1960 isl_constraint_set_coefficient_si(c, isl_dim_in, i, -1);
1961 isl_constraint_set_coefficient_si(c, isl_dim_out, pos[i], 1);
1962 bmap = isl_basic_map_add_constraint(bmap, c);
1964 isl_dim_free(dim);
1966 return isl_map_from_basic_map(bmap);
1969 /* Find all loops involved in any of the index expressions for any of
1970 * the private accesses, move them innermost and then mark them as
1971 * requiring unrolling by setting gen->first_unroll.
1972 * The loops involved should all be parallel because of the checks
1973 * we performed in check_private_group_access. Moving them innermost
1974 * is therefore a valid transformation.
1976 static __isl_give isl_union_map *interchange_for_unroll(struct cuda_gen *gen,
1977 __isl_take isl_union_map *sched)
1979 int i, j;
1980 int unroll[gen->thread_tiled_len];
1981 int perm[gen->thread_tiled_len];
1982 isl_dim *dim;
1983 isl_map *permute;
1984 int len = gen->shared_len + gen->n_parallel + gen->n_block;
1986 gen->first_unroll = -1;
1988 for (i = 0; i < gen->thread_tiled_len; ++i)
1989 unroll[i] = 0;
1990 for (i = 0; i < gen->n_array; ++i) {
1991 struct cuda_array_info *array = &gen->array[i];
1993 for (j = 0; j < array->n_group; ++j) {
1994 isl_union_map *access;
1995 isl_dim *dim;
1996 isl_map *acc;
1998 if (!array->groups[j]->private_bound)
1999 continue;
2001 access = group_access_relation(array->groups[j], 1, 1);
2002 access = isl_union_map_apply_domain(access,
2003 isl_union_map_copy(sched));
2005 dim = isl_union_map_get_dim(access);
2006 dim = isl_dim_add(dim, isl_dim_out, array->n_index);
2007 dim = isl_dim_set_tuple_name(dim, isl_dim_out,
2008 array->name);
2009 dim = isl_dim_add(dim, isl_dim_in,
2010 gen->thread_tiled_len);
2011 acc = isl_union_map_extract_map(access, dim);
2013 isl_map_foreach_basic_map(acc, &check_unroll, unroll);
2015 isl_map_free(acc);
2016 isl_union_map_free(access);
2020 for (i = 0; i < gen->shared_len; ++i)
2021 if (unroll[i])
2022 return sched;
2024 for (i = gen->shared_len; i < len; ++i)
2025 if (unroll[i])
2026 break;
2028 if (i >= len)
2029 return sched;
2031 for (i = len; i < gen->thread_tiled_len; ++i)
2032 if (unroll[i])
2033 return sched;
2035 j = 0;
2036 for (i = 0; i < gen->thread_tiled_len; ++i)
2037 if (!unroll[i])
2038 perm[i] = j++;
2039 gen->first_unroll = 1 + j;
2040 for (i = 0; i < len; ++i)
2041 if (unroll[i])
2042 perm[i] = j++;
2044 dim = isl_union_map_get_dim(sched);
2045 permute = permutation(dim, perm, gen->thread_tiled_len);
2046 sched = isl_union_map_apply_range(sched,
2047 isl_union_map_from_map(permute));
2049 return sched;
2052 /* This function is called for each leaf in the clast of the kernel code.
2053 * We first specialize the schedule to the site of the leaf and
2054 * print code for reading into shared memory, performing the actual
2055 * computations and writing from shared memory, with the required
2056 * synchronizations.
2058 static void print_kernel_user(struct gpucode_info *code,
2059 struct clast_user_stmt *u)
2061 struct cuda_gen *gen = code->user;
2062 isl_set *shared_domain;
2064 shared_domain = extract_entire_host_domain(u);
2066 print_shared_accesses(gen, shared_domain, gen->read, "read", -1);
2068 print_private_accesses(gen, shared_domain, gen->read, "read", -1);
2070 print_shared_body(gen, shared_domain, gen->local_sched,
2071 gen->thread_tiled_len, &print_statement,
2072 gen->first_unroll);
2074 print_private_accesses(gen, shared_domain, gen->write, "write", -1);
2076 print_indent(gen->cuda.kernel_c, gen->kernel_code.indent);
2077 fprintf(gen->cuda.kernel_c, "__syncthreads();\n");
2079 print_shared_accesses(gen, shared_domain, gen->write, "write", -1);
2081 isl_set_free(shared_domain);
2084 /* Check if we need to perform any copying to shared memory at this level
2085 * and if so, print the copying instructions.
2086 * Any array for which we are allowed to print copying instructions at
2087 * this level, but haven't done so already, is printed.
2089 static void print_kernel_for_head(struct gpucode_info *code,
2090 struct clast_for *f)
2092 int i;
2093 struct cuda_gen *gen = code->user;
2094 isl_set *domain;
2095 int level;
2096 int print = 0;
2098 domain = isl_set_from_cloog_domain(cloog_domain_copy(f->domain));
2099 level = isl_set_dim(domain, isl_dim_set) - 1;
2101 for (i = 0; i < gen->n_array; ++i) {
2102 if (gen->array[i].print_shared_level >= 0)
2103 continue;
2104 if (gen->array[i].last_shared > level)
2105 continue;
2106 gen->array[i].print_shared_level = level;
2107 print = 1;
2110 if (print) {
2111 print_shared_accesses(gen, domain, gen->read, "read", level);
2112 print_private_accesses(gen, domain, gen->read, "read", level);
2115 isl_set_free(domain);
2118 /* Print instructions for copying from shared memory for each array
2119 * for which print_kernel_for_head has added copying instructions
2120 * to shared memory.
2122 static void print_kernel_for_foot(struct gpucode_info *code,
2123 struct clast_for *f)
2125 int i;
2126 struct cuda_gen *gen = code->user;
2127 isl_set *domain;
2128 int level;
2129 int print = 0;
2131 domain = isl_set_from_cloog_domain(cloog_domain_copy(f->domain));
2132 level = isl_set_dim(domain, isl_dim_set) - 1;
2134 for (i = 0; i < gen->n_array; ++i) {
2135 if (gen->array[i].print_shared_level != level)
2136 continue;
2137 print = 1;
2138 break;
2141 if (print) {
2142 print_private_accesses(gen, domain, gen->write, "write", level);
2143 print_shared_accesses(gen, domain, gen->write, "write", level);
2146 isl_set_free(domain);
2149 /* Use CLooG to generate code for the outer gen->shared_first loops
2150 * of the local schedule "sched".
2151 * The pretty printing of this code is handled by gpu_print_host_stmt,
2152 * which calls print_kernel_user for each iteration of the shared tile loops.
2154 static void print_cloog_kernel_body(struct cuda_gen *gen,
2155 __isl_keep isl_set *context, __isl_keep isl_union_map *sched)
2157 int i;
2158 CloogOptions *options;
2159 CloogDomain *cloog_context;
2160 CloogUnionDomain *ud;
2161 CloogInput *input;
2162 struct clast_stmt *stmt;
2163 char name[20];
2165 sched = isl_union_map_copy(sched);
2166 sched = isl_union_map_align_params(sched, isl_set_get_dim(context));
2168 options = cloog_options_malloc(gen->state);
2169 options->language = LANGUAGE_C;
2170 options->strides = 1;
2171 options->sh = 1;
2172 options->stop = gen->shared_len;
2173 options->f = gen->tiled_len;
2174 options->l = gen->tiled_len;
2175 options->save_domains = 1;
2176 options->noscalars = 1;
2178 ud = cloog_union_domain_from_isl_union_map(sched);
2179 for (i = 0; i < gen->shared_len; ++i) {
2180 snprintf(name, sizeof(name), "g%d", i);
2181 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
2183 cloog_context = cloog_domain_from_isl_set(isl_set_copy(context));
2184 input = cloog_input_alloc(cloog_context, ud);
2186 stmt = cloog_clast_create_from_input(input, options);
2188 gen->kernel_code.indent = 4;
2189 gen->kernel_code.dst = gen->cuda.kernel_c;
2190 gen->kernel_code.print_user_stmt = NULL;
2191 gen->kernel_code.print_user_stmt_list = &print_kernel_user;
2192 gen->kernel_code.print_for_head = &print_kernel_for_head;
2193 gen->kernel_code.print_for_foot = &print_kernel_for_foot;
2194 gen->kernel_code.user = gen;
2195 gpu_print_host_stmt(&gen->kernel_code, stmt);
2197 cloog_clast_free(stmt);
2198 cloog_options_free(options);
2201 static void print_kernel_iterators(struct cuda_gen *gen)
2203 int i;
2204 const char *block_dims[] = { "blockIdx.x", "blockIdx.y" };
2205 const char *thread_dims[] = { "threadIdx.x", "threadIdx.y",
2206 "threadIdx.z" };
2208 if (gen->n_grid > 0) {
2209 print_indent(gen->cuda.kernel_c, 4);
2210 fprintf(gen->cuda.kernel_c, "int ");
2211 for (i = 0; i < gen->n_grid; ++i) {
2212 if (i)
2213 fprintf(gen->cuda.kernel_c, ", ");
2214 fprintf(gen->cuda.kernel_c, "b%d = %s",
2215 i, block_dims[gen->n_grid - 1 - i]);
2217 fprintf(gen->cuda.kernel_c, ";\n");
2220 if (gen->n_block > 0) {
2221 print_indent(gen->cuda.kernel_c, 4);
2222 fprintf(gen->cuda.kernel_c, "int ");
2223 for (i = 0; i < gen->n_block; ++i) {
2224 if (i)
2225 fprintf(gen->cuda.kernel_c, ", ");
2226 fprintf(gen->cuda.kernel_c, "t%d = %s",
2227 i, thread_dims[gen->n_block - 1 - i]);
2229 fprintf(gen->cuda.kernel_c, ";\n");
2233 static void print_group_shared_array(struct cuda_gen *gen,
2234 struct cuda_array_ref_group *group)
2236 int j;
2237 struct cuda_array_bound *bounds;
2239 bounds = group->private_bound;
2240 if (!bounds)
2241 bounds = group->shared_bound;
2242 if (!bounds)
2243 return;
2245 print_indent(gen->cuda.kernel_c, 4);
2246 fprintf(gen->cuda.kernel_c, "%s%s ",
2247 group->private_bound ? "" : "__shared__ ", gen->options->type);
2248 print_array_name(gen->cuda.kernel_c, group);
2249 for (j = 0; j < group->array->n_index; ++j) {
2250 fprintf(gen->cuda.kernel_c, "[");
2251 isl_int_print(gen->cuda.kernel_c, bounds[j].size, 0);
2252 fprintf(gen->cuda.kernel_c, "]");
2254 fprintf(gen->cuda.kernel_c, ";\n");
2257 static void print_shared_arrays(struct cuda_gen *gen)
2259 int i, j;
2261 for (i = 0; i < gen->n_array; ++i) {
2262 struct cuda_array_info *array = &gen->array[i];
2264 for (j = 0; j < array->n_group; ++j)
2265 print_group_shared_array(gen, array->groups[j]);
2269 static void print_kernel_body(struct cuda_gen *gen,
2270 __isl_keep isl_set *host_domain, __isl_keep isl_union_map *sched)
2272 isl_set *context;
2274 context = isl_set_copy(host_domain);
2275 context = parametrize(context, 0, gen->tile_first, "h");
2276 context = isl_set_project_out(context, isl_dim_set, 0, gen->tile_first);
2277 context = add_bounded_parameters(context,
2278 gen->n_grid, gen->grid_dim, "b");
2280 print_kernel_iterators(gen);
2281 print_shared_arrays(gen);
2283 fprintf(gen->cuda.kernel_c, "\n");
2285 print_cloog_kernel_body(gen, context, sched);
2287 isl_set_free(context);
2290 /* Given a constraint
2292 * a(p,i) + j = g f(e)
2294 * or -a(p,i) - j = g f(e) if sign < 0,
2295 * store a(p,i) in bound->shift and g (stride) in bound->stride.
2296 * a(p,i) is assumed to be an expression in only the parameters.
2298 static void extract_stride(__isl_keep isl_constraint *c,
2299 struct cuda_array_bound *bound, isl_int stride, int sign)
2301 int i;
2302 isl_int v;
2303 isl_int one;
2304 isl_dim *dim;
2305 unsigned nparam;
2306 isl_qpolynomial *qp;
2308 isl_int_set(bound->stride, stride);
2310 dim = isl_constraint_get_dim(c);
2311 dim = isl_dim_drop(dim, isl_dim_out, 0, 1);
2312 dim = isl_dim_drop(dim, isl_dim_in, 0, isl_dim_size(dim, isl_dim_in));
2313 dim = isl_dim_domain(dim);
2315 nparam = isl_dim_size(dim, isl_dim_param);
2317 isl_int_init(v);
2318 isl_int_init(one);
2319 isl_int_set_si(one, 1);
2321 isl_constraint_get_constant(c, &v);
2322 if (sign < 0)
2323 isl_int_neg(v, v);
2324 qp = isl_qpolynomial_rat_cst(isl_dim_copy(dim), v, one);
2326 for (i = 0; i < nparam; ++i) {
2327 isl_qpolynomial *t, *p;
2329 isl_constraint_get_coefficient(c, isl_dim_param, i, &v);
2330 if (isl_int_is_zero(v))
2331 continue;
2332 if (sign < 0)
2333 isl_int_neg(v, v);
2334 t = isl_qpolynomial_rat_cst(isl_dim_copy(dim), v, one);
2335 p = isl_qpolynomial_var(isl_dim_copy(dim), isl_dim_param, i);
2336 t = isl_qpolynomial_mul(t, p);
2337 qp = isl_qpolynomial_add(qp, t);
2340 isl_dim_free(dim);
2341 isl_int_clear(one);
2342 isl_int_clear(v);
2344 bound->shift = qp;
2347 /* Given an equality constraint of a map with a single output dimension j,
2348 * check if the constraint is of the form
2350 * a(p,i) + j = g f(e)
2352 * with a(p,i) an expression in the parameters and input dimensions
2353 * and f(e) an expression in the existentially quantified variables.
2354 * If so, and if g is larger than any such g from a previously considered
2355 * constraint, then call extract_stride. to record the stride information
2356 * in bound.
2358 static int check_stride_constraint(__isl_take isl_constraint *c, void *user)
2360 int i;
2361 isl_int v, stride;
2362 unsigned n_div;
2363 struct cuda_array_bound *bound = user;
2365 isl_int_init(v);
2366 isl_int_init(stride);
2368 n_div = isl_constraint_dim(c, isl_dim_div);
2369 isl_constraint_get_coefficient(c, isl_dim_out, 0, &v);
2371 if (n_div && (isl_int_is_one(v) || isl_int_is_negone(v))) {
2372 int s = isl_int_sgn(v);
2373 isl_int_set_si(stride, 0);
2374 for (i = 0; i < n_div; ++i) {
2375 isl_constraint_get_coefficient(c, isl_dim_div, i, &v);
2376 isl_int_gcd(stride, stride, v);
2378 if (!isl_int_is_zero(stride) &&
2379 isl_int_gt(stride, bound->stride))
2380 extract_stride(c, bound, stride, s);
2383 isl_int_clear(stride);
2384 isl_int_clear(v);
2386 isl_constraint_free(c);
2387 return 0;
2390 /* Given contraints on an array index i, check if we can find
2391 * a shift a(p) and a stride g such that
2393 * a(p) + i = 0 mod g
2395 * If so, record the information in bound and apply the mapping
2396 * i -> (i + a(p))/g to the array index in bounds and return
2397 * the new constraints.
2398 * If not, simply return the original constraints.
2400 static __isl_give isl_basic_map *check_stride(struct cuda_gen *gen,
2401 struct cuda_array_bound *bound, __isl_take isl_basic_map *bounds)
2403 isl_dim *dim;
2404 isl_basic_map *aff;
2405 isl_basic_map *shift;
2406 isl_qpolynomial *qp, *t;
2407 isl_int one;
2409 isl_int_set_si(bound->stride, -1);
2411 aff = isl_basic_map_affine_hull(isl_basic_map_copy(bounds));
2413 isl_basic_map_foreach_constraint(aff, &check_stride_constraint, bound);
2415 isl_basic_map_free(aff);
2417 if (isl_int_is_neg(bound->stride))
2418 return bounds;
2420 qp = isl_qpolynomial_copy(bound->shift);
2421 qp = isl_qpolynomial_add_dims(qp, isl_dim_set, 1);
2422 dim = isl_qpolynomial_get_dim(qp);
2423 t = isl_qpolynomial_var(isl_dim_copy(dim), isl_dim_set, 0);
2424 qp = isl_qpolynomial_add(qp, t);
2425 isl_int_init(one);
2426 isl_int_set_si(one, 1);
2427 t = isl_qpolynomial_rat_cst(dim, one, bound->stride);
2428 isl_int_clear(one);
2429 qp = isl_qpolynomial_mul(qp, t);
2430 shift = isl_basic_map_from_qpolynomial(qp);
2432 bound->shift_map = isl_basic_map_copy(shift);
2433 bounds = isl_basic_map_apply_range(bounds, shift);
2435 return bounds;
2438 struct cuda_size_info {
2439 isl_basic_set *bset;
2440 struct cuda_array_bound *bound;
2441 int pos;
2444 /* Given a constraint from the basic set describing the bounds on
2445 * an array index, check if it is a lower bound, say m i >= b(x), and,
2446 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
2447 * upper bound. If so, and if this bound is smaller than any bound
2448 * derived from earlier constraints, set the size to this bound on
2449 * the expression and the lower bound to b(x).
2451 static int compute_size_in_direction(__isl_take isl_constraint *c, void *user)
2453 struct cuda_size_info *size = user;
2454 unsigned nparam;
2455 unsigned n_div;
2456 isl_int v;
2458 nparam = isl_basic_set_dim(size->bset, isl_dim_param);
2459 n_div = isl_constraint_dim(c, isl_dim_div);
2461 if (isl_constraint_involves_dims(c, isl_dim_div, 0, n_div)) {
2462 isl_constraint_free(c);
2463 return 0;
2466 isl_int_init(v);
2468 isl_constraint_get_coefficient(c, isl_dim_set, size->pos, &v);
2470 if (isl_int_is_pos(v)) {
2471 isl_aff *aff;
2472 isl_qpolynomial *lb;
2473 enum isl_lp_result res;
2475 aff = isl_constraint_get_bound(c, isl_dim_set, size->pos);
2476 aff = isl_aff_ceil(aff);
2478 lb = isl_qpolynomial_from_aff(isl_aff_copy(aff));
2480 aff = isl_aff_neg(aff);
2481 aff = isl_aff_add_coefficient_si(aff, isl_dim_set, size->pos, 1);
2483 res = isl_basic_set_max(size->bset, aff, &v);
2484 isl_aff_free(aff);
2486 if (res == isl_lp_ok) {
2487 isl_int_add_ui(v, v, 1);
2488 if (isl_int_is_neg(size->bound->size) ||
2489 isl_int_lt(v, size->bound->size)) {
2490 isl_int_set(size->bound->size, v);
2491 isl_qpolynomial_free(size->bound->lb);
2492 size->bound->lb = isl_qpolynomial_copy(lb);
2495 isl_qpolynomial_free(lb);
2498 isl_int_clear(v);
2499 isl_constraint_free(c);
2501 return 0;
2504 /* Given a basic map "bounds" that maps parameters and input dimensions
2505 * to a single output dimension, look for an expression in the parameters
2506 * and input dimensions such that the range of the output dimension shifted
2507 * by this expression is a constant.
2509 * In particular, we currently only consider lower bounds on the output
2510 * dimension as candidate expressions.
2512 static int compute_array_dim_size(struct cuda_gen *gen,
2513 struct cuda_array_bound *bound, __isl_take isl_basic_map *bounds)
2515 struct cuda_size_info size;
2517 bounds = check_stride(gen, bound, bounds);
2519 isl_int_set_si(bound->size, -1);
2520 bound->lb = NULL;
2522 size.bound = bound;
2523 size.pos = isl_basic_map_dim(bounds, isl_dim_in);
2524 size.bset = isl_basic_map_wrap(bounds);
2525 size.bset = isl_basic_set_flatten(size.bset);
2526 isl_basic_set_foreach_constraint(size.bset, &compute_size_in_direction,
2527 &size);
2528 isl_basic_set_free(size.bset);
2530 return isl_int_is_nonneg(bound->size) ? 0 : -1;
2533 /* Check if we can find a shared memory tile for the given array
2534 * based on the given accesses, and if so, put the results
2535 * in array->shared_bound.
2537 * We project the accesses on each index in turn and look for a parametric
2538 * offset such that the size is constant.
2540 static int can_tile_for_shared_memory(struct cuda_gen *gen,
2541 struct cuda_array_info *array, __isl_keep isl_map *access,
2542 struct cuda_array_bound *bounds)
2544 int i;
2546 for (i = 0; i < array->n_index; ++i) {
2547 isl_map *access_i;
2548 isl_basic_map *hull;
2550 access_i = isl_map_copy(access);
2551 access_i = isl_map_project_out(access_i, isl_dim_out, 0, i);
2552 access_i = isl_map_project_out(access_i, isl_dim_out,
2553 i + 1, array->n_index - (i + 1));
2554 access_i = isl_map_compute_divs(access_i);
2555 hull = isl_map_simple_hull(access_i);
2556 if (compute_array_dim_size(gen, &bounds[i], hull) < 0)
2557 return 0;
2560 return 1;
2563 /* Construct a map with input the shared tile loops and the loops that
2564 * will be wrapped around the threads that relates these later loops
2565 * to the thread indices and the projects them out.
2567 static __isl_give isl_map *compute_privatization(struct cuda_gen *gen)
2569 isl_map *priv;
2570 isl_map *tiling;
2571 isl_map *proj;
2572 isl_set *par;
2573 isl_dim *dim;
2575 dim = isl_union_map_get_dim(gen->shared_sched);
2577 if (gen->options->wrap)
2578 tiling = wrap(isl_dim_copy(dim), gen->shared_len + gen->n_block,
2579 gen->shared_len, gen->n_block, gen->block_dim);
2580 else
2581 tiling = tile(isl_dim_copy(dim), gen->shared_len + gen->n_block,
2582 gen->shared_len, gen->n_block, gen->block_dim);
2584 priv = tiling;
2586 par = parametrization(dim, gen->shared_len + 2 * gen->n_block,
2587 gen->tile_first + gen->tile_len + gen->n_grid + gen->n_block,
2588 gen->n_block, "t");
2590 priv = isl_map_align_params(priv, isl_set_get_dim(par));
2591 priv = isl_map_intersect_range(priv, par);
2593 dim = isl_map_get_dim(priv);
2594 dim = isl_dim_drop(dim, isl_dim_in, 0, isl_dim_size(dim, isl_dim_in));
2595 dim = isl_dim_drop(dim, isl_dim_out, 0, isl_dim_size(dim, isl_dim_out));
2596 proj = projection(dim, gen->shared_len + 2 * gen->n_block,
2597 gen->shared_len);
2599 priv = isl_map_apply_range(priv, proj);
2601 return priv;
2604 /* Construct a map from domain_dim to domain_dim that increments
2605 * the dimension at position "pos" and leaves all other dimensions
2606 * constant.
2608 static __isl_give isl_map *next(__isl_take isl_dim *domain_dim, int pos)
2610 int i;
2611 int len = isl_dim_size(domain_dim, isl_dim_set);
2612 isl_dim *dim;
2613 isl_basic_map *next;
2615 dim = isl_dim_map_from_set(domain_dim);
2616 next = isl_basic_map_universe(isl_dim_copy(dim));
2618 for (i = 0; i < len; ++i) {
2619 isl_constraint *c;
2621 c = isl_equality_alloc(isl_dim_copy(dim));
2622 isl_constraint_set_coefficient_si(c, isl_dim_in, i, 1);
2623 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
2624 if (i == pos)
2625 isl_constraint_set_constant_si(c, 1);
2626 next = isl_basic_map_add_constraint(next, c);
2629 isl_dim_free(dim);
2631 return isl_map_from_basic_map(next);
2634 /* Check if the given access is coalesced.
2635 * That is, check whether incrementing the dimension that will get
2636 * wrapped over the last thread index results in incrementing
2637 * the last array index.
2639 * This function is only called for access relations without reuse.
2641 static int access_is_coalesced(struct cuda_gen *gen,
2642 __isl_keep isl_union_map *access)
2644 isl_dim *dim;
2645 isl_map *access_map;
2646 isl_map *next_thread_x;
2647 isl_map *next_element;
2648 isl_map *map;
2649 int coalesced;
2651 access = isl_union_map_copy(access);
2652 access = isl_union_map_apply_domain(access,
2653 isl_union_map_copy(gen->tiled_sched));
2654 access_map = isl_union_map_copy_map(access);
2655 isl_union_map_free(access);
2657 dim = isl_map_get_dim(access_map);
2658 dim = isl_dim_domain(dim);
2659 next_thread_x = next(dim, gen->shared_len + gen->n_block - 1);
2661 dim = isl_map_get_dim(access_map);
2662 dim = isl_dim_range(dim);
2663 next_element = next(dim, isl_dim_size(dim, isl_dim_set) - 1);
2665 map = isl_map_apply_domain(next_thread_x, isl_map_copy(access_map));
2666 map = isl_map_apply_range(map, access_map);
2668 coalesced = isl_map_is_subset(map, next_element);
2670 isl_map_free(next_element);
2671 isl_map_free(map);
2673 return coalesced;
2676 /* For the given array reference group, check whether the access is private
2677 * to the thread. That is, check that any given array element
2678 * is only accessed by a single thread.
2679 * We compute an access relation that maps the shared tile loop iterators
2680 * and the shared point loop iterators that will be wrapped over the
2681 * threads to the array elements.
2682 * We actually check that those iterators that will be wrapped
2683 * partition the array space. This check is stricter than necessary
2684 * since several iterations may be mapped onto the same thread
2685 * and then they could be allowed to access the same memory elements,
2686 * but our check does not allow this situation.
2688 * We also check that the index expression only depends on parallel
2689 * loops. That way, we can move those loops innermost and unroll them.
2690 * Again, we use a test that is stricter than necessary.
2691 * We actually check whether the index expression only depends
2692 * on the iterators that are wrapped over the threads.
2693 * These are necessarily parallel, but there may be more parallel loops.
2695 * Combining the injectivity of the first test with the single-valuedness
2696 * of the second test, we simply test for bijectivity.
2698 * If it turns out we can use registers, we compute the private memory
2699 * tile size using can_tile_for_shared_memory, after introducing a dependence
2700 * on the thread indices.
2702 * Before performing any of the above computations, we first check
2703 * if there is any reuse on the reference group. If not, we simply
2704 * return. If, moreover, the access is coalesced then we also remove
2705 * the shared memory tiling since we should just use global memory instead.
2707 static void check_private_group_access(struct cuda_gen *gen,
2708 struct cuda_array_ref_group *group)
2710 isl_map *acc;
2711 isl_union_map *access;
2712 int n_index = group->array->n_index;
2714 access = group_access_relation(group, 1, 1);
2715 if (isl_union_map_is_injective(access)) {
2716 if (group->shared_bound && access_is_coalesced(gen, access)) {
2717 free_bound_list(group->shared_bound, n_index);
2718 group->shared_bound = NULL;
2720 isl_union_map_free(access);
2721 return;
2723 access = isl_union_map_apply_domain(access,
2724 isl_union_map_copy(gen->shared_sched));
2726 acc = isl_union_map_copy_map(access);
2727 isl_union_map_free(access);
2729 if (!isl_map_is_bijective(acc)) {
2730 isl_map_free(acc);
2731 return;
2734 group->private_bound = create_bound_list(gen->ctx, n_index);
2735 acc = isl_map_align_params(acc, isl_map_get_dim(gen->privatization));
2736 acc = isl_map_apply_domain(acc, isl_map_copy(gen->privatization));
2737 if (!can_tile_for_shared_memory(gen, group->array, acc,
2738 group->private_bound)) {
2739 free_bound_list(group->private_bound, n_index);
2740 group->private_bound = NULL;
2743 isl_map_free(acc);
2746 /* Look for the last shared tile loop that affects the offset of the
2747 * shared or private tile and store the result in array->last_shared.
2749 static void set_last_shared(struct cuda_gen *gen,
2750 struct cuda_array_ref_group *group)
2752 int i, j;
2753 struct cuda_array_bound *bounds;
2754 unsigned first_shared = gen->first_shared;
2755 int n_index = group->array->n_index;
2757 bounds = group->private_bound;
2758 if (!bounds)
2759 bounds = group->shared_bound;
2760 if (!bounds)
2761 return;
2763 for (j = gen->shared_len - 1; j >= 0; --j) {
2764 for (i = 0; i < n_index; ++i) {
2765 isl_qpolynomial *lb, *shift;
2767 lb = bounds[i].lb;
2768 if (isl_qpolynomial_involves_dims(lb, isl_dim_param,
2769 first_shared + j, 1))
2770 break;
2772 shift = bounds[i].shift;
2773 if (!shift)
2774 continue;
2775 if (isl_qpolynomial_involves_dims(shift, isl_dim_param,
2776 first_shared + j, 1))
2777 break;
2779 if (i < n_index)
2780 break;
2782 group->array->last_shared = j;
2785 /* Compute the sizes of all private arrays for the current kernel,
2786 * as well as the offsets of the private pieces in the original arrays.
2787 * If we cannot or don't want to privatize a given array group,
2788 * we use the shared memory tile sizes computed in
2789 * compute_group_shared_bound instead.
2791 * If a given Array only has a single reference group and if we have
2792 * been able to find a privated or shared tile,
2793 * we also look for the last shared tile loop that affects the offset
2794 * (and therefore the array tile) and store the result in array->last_shared.
2796 * A privatized copy of all access relations from reference groups that
2797 * are mapped to private memory is stored in gen->privatization.
2799 static void compute_private_size(struct cuda_gen *gen)
2801 int i, j;
2802 isl_union_map *private;
2804 private = isl_union_map_empty(isl_union_map_get_dim(gen->shared_sched));
2806 for (i = 0; i < gen->n_array; ++i) {
2807 struct cuda_array_info *array = &gen->array[i];
2809 for (j = 0; j < array->n_group; ++j) {
2810 check_private_group_access(gen, array->groups[j]);
2812 if (!array->groups[j]->private_bound)
2813 continue;
2815 private = isl_union_map_union(private,
2816 group_access_relation(array->groups[j], 1, 1));
2819 array->last_shared = gen->shared_len - 1;
2820 array->print_shared_level = -1;
2822 if (array->n_group != 1)
2823 continue;
2824 set_last_shared(gen, array->groups[0]);
2827 if (isl_union_map_is_empty(private))
2828 isl_union_map_free(private);
2829 else {
2830 isl_union_map *priv;
2832 private = isl_union_map_apply_domain(private,
2833 isl_union_map_copy(gen->shared_sched));
2834 priv = isl_union_map_from_map(isl_map_copy(gen->privatization));
2835 private = isl_union_map_apply_domain(private, priv);
2836 gen->private_access = private;
2840 /* Fill up the groups array with singleton groups, i.e., one group
2841 * per reference, initializing the array, access, write and refs fields.
2842 * In particular the access field is initialized to the scheduled
2843 * access relation of the array reference.
2845 * Return the number of elements initialized, i.e., the number of
2846 * active references in the current kernel.
2848 static int populate_array_references(struct cuda_gen *gen,
2849 struct cuda_array_info *array, __isl_keep isl_union_map *sched,
2850 struct cuda_array_ref_group **groups)
2852 int i;
2853 int n;
2854 isl_ctx *ctx = isl_union_map_get_ctx(sched);
2856 n = 0;
2857 for (i = 0; i < array->n_ref; ++i) {
2858 isl_union_map *umap;
2859 isl_map *map;
2860 struct cuda_array_ref_group *group;
2861 struct cuda_stmt_access *access = array->refs[i];
2863 map = isl_map_copy(access->access);
2864 umap = isl_union_map_from_map(map);
2865 umap = isl_union_map_apply_domain(umap,
2866 isl_union_map_copy(sched));
2868 map = isl_union_map_copy_map(umap);
2869 isl_union_map_free(umap);
2871 if (isl_map_is_empty(map)) {
2872 isl_map_free(map);
2873 continue;
2876 group = isl_calloc_type(ctx, struct cuda_array_ref_group);
2877 assert(group);
2878 group->array = array;
2879 group->access = map;
2880 group->write = access->write;
2881 group->refs = &array->refs[i];
2883 groups[n++] = group;
2886 return n;
2889 static void free_array_ref_group(struct cuda_array_ref_group *group,
2890 int n_index)
2892 if (!group)
2893 return;
2894 free_bound_list(group->shared_bound, n_index);
2895 free_bound_list(group->private_bound, n_index);
2896 isl_map_free(group->access);
2897 free(group->refs);
2898 free(group);
2901 /* If two groups have overlapping access relations and if one of them
2902 * involves a write, then merge the two groups into one.
2904 * We keep track of the grouping in "leader". leader[j] points to
2905 * an earlier group array element that belongs to the same group,
2906 * or the array element j itself if this element is the first in the group.
2908 * Return the number of group leaders.
2910 static int group_overlapping_writes(int n,
2911 struct cuda_array_ref_group **groups, int *leader)
2913 int i, j;
2914 int n_group = n;
2916 for (i = 0; i < n; ++i) {
2917 int l = i;
2918 groups[l]->n_ref = 1;
2919 for (j = i - 1; j >= 0; --j) {
2920 isl_map *map;
2921 int empty;
2923 if (leader[j] != j)
2924 continue;
2925 if (!groups[l]->write && !groups[j]->write)
2926 continue;
2928 map = isl_map_intersect(isl_map_copy(groups[l]->access),
2929 isl_map_copy(groups[j]->access));
2930 empty = isl_map_is_empty(map);
2931 isl_map_free(map);
2933 if (empty)
2934 continue;
2936 groups[j]->access = isl_map_union(groups[j]->access,
2937 groups[l]->access);
2938 groups[j]->write = 1;
2939 groups[l]->access = NULL;
2940 groups[j]->n_ref += groups[l]->n_ref;
2941 l = leader[l] = j;
2942 n_group--;
2944 leader[i] = l;
2947 return n_group;
2950 /* Compute the size of the shared array corresponding to the given array
2951 * array refrence group, based on the accesses from the current kernel,
2952 * as well as the offset of the shared piece in the original array.
2954 static void compute_group_shared_bound(struct cuda_gen *gen,
2955 struct cuda_array_info *array, struct cuda_array_ref_group *group)
2957 isl_ctx *ctx = isl_dim_get_ctx(array->dim);
2959 group->shared_bound = create_bound_list(ctx, array->n_index);
2960 if (!can_tile_for_shared_memory(gen, array, group->access,
2961 group->shared_bound)) {
2962 free_bound_list(group->shared_bound, array->n_index);
2963 group->shared_bound = NULL;
2967 /* Given an initial grouping of array references and shared memory tiles
2968 * for each group that allows for a shared memory tile, merge two groups
2969 * if both have a shared memory tile and if the merged group also has
2970 * a shared memory tile.
2972 * Return the number of group leaders after merging.
2974 static int group_common_shared_memory_tile(struct cuda_gen *gen,
2975 struct cuda_array_info *array, int n,
2976 struct cuda_array_ref_group **groups, int *leader, int n_group)
2978 int i, j;
2979 isl_ctx *ctx = isl_dim_get_ctx(array->dim);
2981 for (i = 0; n_group > 1 && i < n; ++i) {
2982 int l = i;
2983 if (leader[i] != i)
2984 continue;
2985 if (!groups[i]->shared_bound)
2986 continue;
2987 for (j = i - 1; j >= 0; --j) {
2988 isl_map *map;
2989 int empty;
2990 struct cuda_array_bound *shared_bound;
2992 if (leader[j] != j)
2993 continue;
2994 if (!groups[j]->shared_bound)
2995 continue;
2997 map = isl_map_intersect(isl_map_copy(groups[l]->access),
2998 isl_map_copy(groups[j]->access));
2999 empty = isl_map_is_empty(map);
3000 isl_map_free(map);
3002 if (empty)
3003 continue;
3005 map = isl_map_union(isl_map_copy(groups[l]->access),
3006 isl_map_copy(groups[j]->access));
3007 shared_bound = create_bound_list(ctx, array->n_index);
3008 if (!can_tile_for_shared_memory(gen, array, map,
3009 shared_bound)) {
3010 isl_map_free(map);
3011 free_bound_list(shared_bound, array->n_index);
3012 continue;
3015 free_bound_list(groups[j]->shared_bound,
3016 array->n_index);
3017 groups[j]->shared_bound = shared_bound;
3018 isl_map_free(groups[j]->access);
3019 groups[j]->access = map;
3020 groups[j]->n_ref += groups[l]->n_ref;
3021 l = leader[l] = j;
3022 n_group--;
3026 return n_group;
3029 /* Extract an array of array reference groups from the array of references
3030 * and the grouping information in "leader".
3032 * Store the results in array->n_group and array->groups.
3034 static void extract_array_groups(isl_ctx *ctx, struct cuda_array_info *array,
3035 int n, struct cuda_array_ref_group **groups, int *leader, int n_group)
3037 int i, j;
3039 for (i = 2; i < n; ++i)
3040 leader[i] = leader[leader[i]];
3042 array->n_group = n_group;
3043 array->groups = isl_alloc_array(ctx, struct cuda_array_ref_group *,
3044 n_group);
3045 assert(array->groups);
3047 j = 0;
3048 for (i = 0; i < n; ++i) {
3049 int k, l;
3050 struct cuda_stmt_access **refs;
3052 if (leader[i] != i) {
3053 groups[i]->refs = NULL;
3054 free_array_ref_group(groups[i], array->n_index);
3055 continue;
3058 refs = isl_alloc_array(ctx, struct cuda_stmt_access *,
3059 groups[i]->n_ref);
3060 assert(refs);
3061 l = 0;
3062 for (k = i; k < n; ++k)
3063 if (leader[k] == i) {
3064 refs[l++] = *groups[k]->refs;
3065 (*groups[k]->refs)->group = j;
3068 groups[i]->refs = refs;
3069 groups[i]->nr = j;
3070 array->groups[j++] = groups[i];
3074 /* Group array references that should be considered together when
3075 * deciding whether to access them from private, shared or global memory.
3077 * In particular, if two array references overlap and if one of them
3078 * is a write, then the two references are grouped together.
3079 * Furthermore, if two groups admit a shared memory tile and if the
3080 * combination of the two also admits a shared memory tile, we merge
3081 * the two groups.
3083 * During the construction the group->refs field points to a single
3084 * array reference inside the array of array references, while
3085 * group->n_ref contains the number of element in leader that
3086 * (directly or indirectly) point to this group, provided the group
3087 * is a leader.
3089 static void group_array_references(struct cuda_gen *gen,
3090 struct cuda_array_info *array, __isl_keep isl_union_map *sched)
3092 int i;
3093 int n, n_group;
3094 isl_ctx *ctx = isl_union_map_get_ctx(sched);
3095 struct cuda_array_ref_group **groups;
3096 int *leader;
3098 groups = isl_calloc_array(ctx, struct cuda_array_ref_group *,
3099 array->n_ref);
3100 assert(groups);
3102 n = populate_array_references(gen, array, sched, groups);
3104 leader = isl_alloc_array(ctx, int, n);
3105 assert(leader);
3107 n_group = group_overlapping_writes(n, groups, leader);
3109 for (i = 0; i < n; ++i)
3110 if (leader[i] == i)
3111 compute_group_shared_bound(gen, array, groups[i]);
3113 n_group = group_common_shared_memory_tile(gen, array, n, groups,
3114 leader, n_group);
3116 extract_array_groups(ctx, array, n, groups, leader, n_group);
3118 free(leader);
3119 free(groups);
3122 /* Take tiled_sched, project it onto the shared tile loops and
3123 * the loops that will be wrapped over the threads,
3124 * parametrize the shared tile loops and store the result in gen->shared_sched.
3125 * The position of the first of these parameters is stored in gen->first_shared.
3126 * Also compute a projection that projects out the loops that will be
3127 * wrapped over the threads and store this projection in gen->shared_proj.
3129 static void compute_shared_sched(struct cuda_gen *gen)
3131 isl_dim *dim;
3132 isl_map *proj;
3133 isl_set *par;
3134 isl_union_map *sched;
3136 sched = isl_union_map_copy(gen->tiled_sched);
3138 dim = isl_union_map_get_dim(sched);
3139 gen->first_shared = isl_dim_size(dim, isl_dim_param);
3140 proj = projection(dim, gen->tiled_len, gen->shared_len + gen->n_block);
3141 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
3143 dim = isl_union_map_get_dim(sched);
3144 par = parametrization(dim, gen->shared_len + gen->n_block,
3145 0, gen->shared_len, "g");
3146 sched = isl_union_map_intersect_range(sched,
3147 isl_union_set_from_set(par));
3149 dim = isl_union_map_get_dim(sched);
3150 proj = projection(dim, gen->shared_len + gen->n_block, gen->shared_len);
3152 gen->shared_sched = sched;
3153 gen->shared_proj = isl_union_map_from_map(proj);
3156 /* Group references of all arrays in the program.
3158 static void group_references(struct cuda_gen *gen)
3160 int i;
3161 isl_union_map *sched;
3163 sched = isl_union_map_apply_range(isl_union_map_copy(gen->shared_sched),
3164 isl_union_map_copy(gen->shared_proj));
3166 for (i = 0; i < gen->n_array; ++i)
3167 group_array_references(gen, &gen->array[i], sched);
3169 isl_union_map_free(sched);
3172 /* Free all array information that is local to the current kernel.
3174 static void free_local_array_info(struct cuda_gen *gen)
3176 int i, j;
3178 for (i = 0; i < gen->n_array; ++i) {
3179 struct cuda_array_info *array = &gen->array[i];
3181 for (j = 0; j < array->n_group; ++j)
3182 free_array_ref_group(array->groups[j], array->n_index);
3183 free(array->groups);
3185 if (array->n_group == 0)
3186 continue;
3187 for (j = 0; j < gen->array[i].n_index; ++j) {
3188 isl_pw_qpolynomial_fold_free(gen->array[i].local_bound[j]);
3189 gen->array[i].local_bound[j] = NULL;
3194 static void print_iterator_list(FILE *out, int len, const char *prefix,
3195 int parens)
3197 int i;
3199 fprintf(out, "(");
3200 for (i = 0; i < len; ++i) {
3201 if (i)
3202 fprintf(out, ", ");
3203 if (parens)
3204 fprintf(out, "(%s%d)", prefix, i);
3205 else
3206 fprintf(out, "%s%d", prefix, i);
3208 fprintf(out, ")");
3211 /* Print an access to the element in the global memory copy of the
3212 * given array that corresponds to element [a0][a1]... of the original array.
3213 * The copy in global memory has been linearized, so we need to take
3214 * the array size into account.
3216 static void print_global_index(isl_ctx *ctx, FILE *out,
3217 struct cuda_array_info *array)
3219 int i;
3220 isl_printer *prn;
3222 fprintf(out, "%s[", array->name);
3223 for (i = 0; i + 1 < array->n_index; ++i)
3224 fprintf(out, "(");
3225 for (i = 0; i < array->n_index; ++i) {
3226 if (i) {
3227 prn = isl_printer_to_file(ctx, out);
3228 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
3229 prn = isl_printer_print_str(prn, ") * (");
3230 prn = isl_printer_print_pw_qpolynomial_fold(prn,
3231 array->local_bound[i]);
3232 prn = isl_printer_print_str(prn, ") + ");
3233 isl_printer_free(prn);
3235 fprintf(out, "a%d", i);
3237 fprintf(out, "]");
3240 /* Print an access to the element in the shared memory copy of the
3241 * given array that corresponds to element [a0][a1]... of the original array.
3242 * Since the array in shared memory is just a shifted copy of part
3243 * of the original array, we simply need to subtract the lower bound,
3244 * which was computed in can_tile_for_shared_memory.
3245 * If any of the indices is strided, then we first add
3246 * shared_bound[i].shift and divide by shared_bound[i].stride.
3248 static void print_local_index(FILE *out, struct cuda_array_ref_group *group)
3250 int i;
3251 isl_ctx *ctx;
3252 isl_printer *prn;
3253 struct cuda_array_bound *bounds = group->shared_bound;
3255 ctx = isl_dim_get_ctx(group->array->dim);
3256 print_array_name(out, group);
3257 for (i = 0; i < group->array->n_index; ++i) {
3258 fprintf(out, "[(a%d", i);
3259 if (bounds[i].shift) {
3260 fprintf(out, " + (");
3261 prn = isl_printer_to_file(ctx, out);
3262 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
3263 prn = isl_printer_print_qpolynomial(prn,
3264 bounds[i].shift);
3265 prn = isl_printer_print_str(prn, "))/");
3266 prn = isl_printer_print_isl_int(prn,
3267 bounds[i].stride);
3268 isl_printer_free(prn);
3269 } else
3270 fprintf(out, ")");
3271 fprintf(out, " - (");
3272 prn = isl_printer_to_file(ctx, out);
3273 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
3274 prn = isl_printer_print_qpolynomial(prn, bounds[i].lb);
3275 isl_printer_free(prn);
3276 fprintf(out, ")]");
3280 /* Print '#define's for copying data from global memory to shared
3281 * memory and back for the given array.
3283 static void print_array_copy_defines(struct cuda_gen *gen,
3284 struct cuda_array_ref_group *group)
3286 int i;
3287 const char *type[] = { "read", "write" };
3288 struct cuda_array_info *array = group->array;
3289 int n_index = array->n_index;
3291 for (i = 0; i < 2; ++i) {
3292 fprintf(gen->cuda.kernel_c, "#define %s_", type[i]);
3293 print_array_name(gen->cuda.kernel_c, group);
3294 print_iterator_list(gen->cuda.kernel_c, n_index, "a", 0);
3295 fprintf(gen->cuda.kernel_c, " %s_", type[i]);
3296 print_array_name(gen->cuda.kernel_c, group);
3297 fprintf(gen->cuda.kernel_c, "_");
3298 print_iterator_list(gen->cuda.kernel_c, n_index, "a", 1);
3299 fprintf(gen->cuda.kernel_c, "\n");
3301 fprintf(gen->cuda.kernel_c, "#define %s_", type[i]);
3302 print_array_name(gen->cuda.kernel_c, group);
3303 fprintf(gen->cuda.kernel_c, "_");
3304 print_iterator_list(gen->cuda.kernel_c, n_index, "a", 0);
3305 if (i) {
3306 fprintf(gen->cuda.kernel_c, " ");
3307 print_global_index(gen->ctx, gen->cuda.kernel_c, array);
3308 fprintf(gen->cuda.kernel_c, " = ");
3309 print_local_index(gen->cuda.kernel_c, group);
3310 } else {
3311 fprintf(gen->cuda.kernel_c, " ");
3312 print_local_index(gen->cuda.kernel_c, group);
3313 fprintf(gen->cuda.kernel_c, " = ");
3314 print_global_index(gen->ctx, gen->cuda.kernel_c, array);
3316 fprintf(gen->cuda.kernel_c, "\n");
3320 static void print_copy_defines(struct cuda_gen *gen)
3322 int i, j;
3324 for (i = 0; i < gen->n_array; ++i) {
3325 struct cuda_array_info *array = &gen->array[i];
3327 for (j = 0; j < array->n_group; ++j) {
3328 if (array->groups[j]->private_bound)
3329 continue;
3330 if (!array->groups[j]->shared_bound)
3331 continue;
3332 print_array_copy_defines(gen, array->groups[j]);
3337 /* The sizes of the arrays on the host that have been computed by
3338 * extract_array_info may depend on the parameters. Use the extra
3339 * constraints on the parameters that are valid at "host_domain"
3340 * to simplify these expressions.
3342 static void localize_bounds(struct cuda_gen *gen,
3343 __isl_keep isl_set *host_domain)
3345 int i, j;
3346 isl_set *context;
3347 unsigned nvar;
3349 context = isl_set_copy(host_domain);
3350 nvar = isl_set_dim(host_domain, isl_dim_set);
3351 context = isl_set_project_out(host_domain, isl_dim_set, 0, nvar);
3353 for (i = 0; i < gen->n_array; ++i) {
3354 struct cuda_array_info *array = &gen->array[i];
3356 if (array->n_group == 0)
3357 continue;
3359 for (j = 0; j < array->n_index; ++j) {
3360 isl_pw_qpolynomial_fold *pwf;
3362 pwf = isl_pw_qpolynomial_fold_copy(array->bound[j]);
3363 pwf = isl_pw_qpolynomial_fold_gist(pwf,
3364 isl_set_copy(context));
3365 array->local_bound[j] = pwf;
3368 isl_set_free(context);
3371 /* Set gen->tile_len and gen->n_parallel to those of the first statement
3372 * in the statement list u.
3373 * Because of the way the schedule is constructed, the other statements
3374 * in the list, if any, should have the same values for these properties.
3376 static void set_tile_len(struct cuda_gen *gen, struct clast_user_stmt *u)
3378 int nr;
3379 struct cuda_stmt *stmt;
3381 nr = atoi(u->statement->name + 2);
3382 stmt = &gen->stmts[nr];
3384 gen->tile_len = stmt->tile_len;
3385 gen->n_parallel = stmt->n_parallel;
3388 /* This function is called for each leaf in the clast of the host code.
3389 * We first specialize the schedule to the site of the leaf, compute
3390 * the size of shared memory and then print the body of host code
3391 * and the associated kernel (through a call to print_kernel_body).
3393 static void print_host_user(struct gpucode_info *code,
3394 struct clast_user_stmt *u)
3396 struct cuda_gen *gen = code->user;
3397 isl_dim *dim;
3398 isl_set *par;
3399 isl_set *host_domain;
3400 isl_union_map *access;
3401 isl_union_map *local_sched;
3402 isl_union_set *arrays;
3404 set_tile_len(gen, u);
3405 read_sizes(gen);
3407 host_domain = extract_entire_host_domain(u);
3409 local_sched = isl_union_map_intersect_range(
3410 isl_union_map_copy(gen->sched),
3411 isl_union_set_from_set(extend(isl_set_copy(host_domain),
3412 gen->untiled_len)));
3413 access = isl_union_map_union(isl_union_map_copy(gen->read),
3414 isl_union_map_copy(gen->write));
3415 access = isl_union_map_apply_domain(access,
3416 isl_union_map_copy(local_sched));
3417 arrays = isl_union_map_range(access);
3419 print_indent(code->dst, code->indent);
3420 fprintf(code->dst, "dim3 k%d_dimBlock(", gen->kernel_id);
3421 print_reverse_list(code->dst, gen->n_block, gen->block_dim);
3422 fprintf(code->dst, ");\n");
3424 print_indent(code->dst, code->indent);
3425 fprintf(code->dst, "dim3 k%d_dimGrid(", gen->kernel_id);
3426 print_reverse_list(code->dst, gen->n_grid, gen->grid_dim);
3427 fprintf(code->dst, ");\n");
3429 gen->tiled_sched = tile_schedule(gen, local_sched);
3430 gen->tiled_sched = parametrize_tiled_schedule(gen, gen->tiled_sched);
3431 gen->tiled_sched = scale_tile_loops(gen, gen->tiled_sched);
3433 gen->local_sched = isl_union_map_copy(gen->tiled_sched);
3435 dim = isl_union_map_get_dim(gen->local_sched);
3436 par = parametrization(dim, gen->tiled_len, 0, gen->shared_len, "g");
3437 gen->local_sched = isl_union_map_intersect_range(gen->local_sched,
3438 isl_union_set_from_set(par));
3440 gen->local_sched = thread_tile_schedule(gen, gen->local_sched);
3441 gen->local_sched = scale_thread_tile_loops(gen, gen->local_sched);
3443 gen->private_access = NULL;
3444 compute_shared_sched(gen);
3445 gen->privatization = compute_privatization(gen);
3446 group_references(gen);
3447 compute_private_size(gen);
3448 localize_bounds(gen, host_domain);
3450 gen->local_sched = interchange_for_unroll(gen, gen->local_sched);
3452 print_copy_defines(gen);
3453 print_kernel_launch(gen, arrays);
3455 fprintf(gen->cuda.kernel_c, "{\n");
3457 print_kernel_body(gen, host_domain, gen->tiled_sched);
3459 fprintf(gen->cuda.kernel_c, "}\n");
3461 free_local_array_info(gen);
3462 isl_map_free(gen->privatization);
3463 isl_union_map_free(gen->private_access);
3464 isl_union_map_free(gen->local_sched);
3465 isl_union_map_free(gen->tiled_sched);
3466 isl_union_map_free(gen->shared_sched);
3467 isl_union_map_free(gen->shared_proj);
3468 isl_union_set_free(arrays);
3469 isl_set_free(host_domain);
3471 free(gen->tile_size);
3472 gen->kernel_id++;
3475 /* Use CLooG to generate code for the outer gen->tile_first loops
3476 * of the global schedule in gen->sched.
3477 * The pretty printing of this code is handled by gpu_print_host_stmt,
3478 * which calls print_host_user for each kernel invocation location.
3480 static void print_cloog_host_code(struct cuda_gen *gen)
3482 int i;
3483 isl_set *context;
3484 isl_union_map *sched;
3485 CloogOptions *options;
3486 CloogDomain *cloog_context;
3487 CloogUnionDomain *ud;
3488 CloogInput *input;
3489 struct clast_stmt *stmt;
3490 char name[20];
3492 options = cloog_options_malloc(gen->state);
3493 options->language = LANGUAGE_C;
3494 options->otl = 0;
3495 options->strides = 1;
3496 options->stop = gen->tile_first;
3497 options->f = gen->untiled_len;
3498 options->l = gen->untiled_len;
3499 options->save_domains = 1;
3500 options->noscalars = 1;
3502 sched = isl_union_map_copy(gen->sched);
3503 ud = cloog_union_domain_from_isl_union_map(sched);
3504 for (i = 0; i < options->stop; ++i) {
3505 snprintf(name, sizeof(name), "h%d", i);
3506 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
3508 context = isl_set_copy(gen->context);
3509 cloog_context = cloog_domain_from_isl_set(context);
3510 input = cloog_input_alloc(cloog_context, ud);
3512 stmt = cloog_clast_create_from_input(input, options);
3514 gen->code.indent = 0;
3515 gen->code.dst = gen->cuda.host_c;
3516 gen->code.print_user_stmt = NULL;
3517 gen->code.print_user_stmt_list = &print_host_user;
3518 gen->code.print_for_head = NULL;
3519 gen->code.print_for_foot = NULL;
3520 gen->code.user = gen;
3521 gpu_print_host_stmt(&gen->code, stmt);
3523 cloog_clast_free(stmt);
3524 cloog_options_free(options);
3527 void print_host_code(struct cuda_gen *gen)
3529 fprintf(gen->cuda.host_c, "{\n");
3530 print_cloog_macros(gen->cuda.host_c);
3531 print_cloog_macros(gen->cuda.kernel_c);
3533 declare_device_arrays(gen);
3535 allocate_device_arrays(gen);
3536 copy_arrays_to_device(gen);
3538 gen->kernel_id = 0;
3539 print_cloog_host_code(gen);
3541 copy_arrays_from_device(gen);
3542 free_device_arrays(gen);
3544 fprintf(gen->cuda.host_c, "}\n");
3547 __isl_give isl_set *add_context_from_str(__isl_take isl_set *set,
3548 const char *str)
3550 isl_ctx *ctx;
3551 isl_set *context;
3553 if (!str)
3554 return set;
3556 ctx = isl_set_get_ctx(set);
3557 context = isl_set_read_from_str(ctx, str, -1);
3558 context = isl_set_align_params(context, isl_set_get_dim(set));
3559 set = isl_set_intersect(set, context);
3561 return set;
3564 /* Convert scop->context to an isl_set.
3566 static __isl_give isl_set *extract_context(isl_ctx *ctx, scoplib_scop_p scop)
3568 isl_dim *dim;
3570 dim = isl_dim_set_alloc(ctx, scop->nb_parameters, 0);
3571 dim = set_dim_names(dim, isl_dim_param, scop->parameters);
3572 return scoplib_matrix_to_isl_set(scop->context, dim);
3575 /* Return an array of cuda_stmt representing the statements in "scop".
3577 static struct cuda_stmt *extract_stmts(isl_ctx *ctx, scoplib_scop_p scop,
3578 __isl_keep isl_set *context)
3580 int n;
3581 struct cuda_stmt *stmts;
3582 scoplib_statement_p stmt = scop->statement;
3584 n = scoplib_statement_number(scop->statement);
3585 stmts = isl_calloc_array(ctx, struct cuda_stmt, n);
3586 assert(stmts);
3588 for (stmt = scop->statement, n = 0; stmt; stmt = stmt->next, n++) {
3589 char name[20];
3590 isl_dim *dim;
3591 struct cuda_stmt *s = &stmts[n];
3593 snprintf(name, sizeof(name), "S_%d", n);
3595 dim = isl_dim_set_alloc(ctx, scop->nb_parameters,
3596 stmt->nb_iterators);
3597 dim = set_dim_names(dim, isl_dim_param, scop->parameters);
3598 dim = set_dim_names(dim, isl_dim_set, stmt->iterators);
3599 dim = isl_dim_set_tuple_name(dim, isl_dim_set, name);
3600 dim = set_dim_names(dim, isl_dim_set, stmt->iterators);
3601 s->domain = scoplib_matrix_list_to_isl_set(stmt->domain,
3602 dim);
3603 s->domain = isl_set_intersect(s->domain, isl_set_copy(context));
3604 s->text = strdup(stmt->body);
3605 stmt_extract_accesses(s);
3608 return stmts;
3611 /* Extract all the read and write accesses from "scop" and store
3612 * them in gen->read and gen->write.
3614 static void extract_accesses(struct cuda_gen *gen, scoplib_scop_p scop)
3616 int i;
3617 int n = scoplib_statement_number(scop->statement);
3618 isl_dim *dim;
3619 scoplib_statement_p stmt;
3621 dim = isl_set_get_dim(gen->context);
3622 gen->write = isl_union_map_empty(isl_dim_copy(dim));
3623 gen->read = isl_union_map_empty(dim);
3625 for (i = 0, stmt = scop->statement; i < n; ++i, stmt = stmt->next) {
3626 isl_union_map *read_i;
3627 isl_union_map *write_i;
3629 read_i = scoplib_access_to_isl_union_map(stmt->read,
3630 isl_set_copy(gen->stmts[i].domain),
3631 scop->arrays);
3632 write_i = scoplib_access_to_isl_union_map(stmt->write,
3633 isl_set_copy(gen->stmts[i].domain),
3634 scop->arrays);
3636 gen->read = isl_union_map_union(gen->read, read_i);
3637 gen->write = isl_union_map_union(gen->write, write_i);
3641 /* Extract and return the original schedule of the program from "scop".
3643 static isl_union_map *extract_original_schedule(struct cuda_gen *gen,
3644 scoplib_scop_p scop)
3646 int i;
3647 int n = scoplib_statement_number(scop->statement);
3648 isl_dim *dim;
3649 isl_union_map *sched;
3650 scoplib_statement_p stmt;
3652 dim = isl_set_get_dim(gen->context);
3653 sched = isl_union_map_empty(dim);
3655 for (i = 0, stmt = scop->statement; i < n; ++i, stmt = stmt->next) {
3656 isl_map *sched_i;
3658 dim = isl_set_get_dim(gen->stmts[i].domain);
3659 dim = isl_dim_from_domain(dim);
3660 dim = isl_dim_add(dim, isl_dim_out, 2 * stmt->nb_iterators + 1);
3661 sched_i = scoplib_schedule_to_isl_map(stmt->schedule, dim);
3663 sched = isl_union_map_union(sched,
3664 isl_union_map_from_map(sched_i));
3667 return sched;
3670 /* Return the union of all iteration domains of the gen->stmts[i].
3672 static __isl_give isl_union_set *extract_domain(struct cuda_gen *gen)
3674 int i;
3675 isl_union_set *domain;
3677 domain = isl_union_set_empty(isl_set_get_dim(gen->context));
3678 for (i = 0; i < gen->n_stmts; ++i) {
3679 isl_set *domain_i;
3681 domain_i = isl_set_copy(gen->stmts[i].domain);
3682 domain = isl_union_set_union(domain,
3683 isl_union_set_from_set(domain_i));
3686 return domain;
3689 /* Information about the outermost tilable bands in the forest of bands.
3691 * tile_len and n_parallel are only sets on band_info structures
3692 * that correspond to outermost bands. For other bands (in particular,
3693 * ancestors of the outermost bands), n_parallal is set to 0.
3695 * prefix is the (padded) schedule leading up to the outermost tilable bands.
3697 * tile_first is the number of schedule dimensions in prefix.
3699 * suffix is the schedule of the outermost tilable bands and their descendants.
3701 struct band_info {
3702 struct cuda_gen *gen;
3703 int tile_first;
3704 int tile_len;
3705 int n_parallel;
3706 isl_union_map *prefix;
3707 isl_union_map *suffix;
3710 /* Set tile_len and n_parallel of the statement to that of
3711 * their outermost band, recorded in the band_info.
3713 static int set_stmt_tile_len(__isl_take isl_map *map, void *user)
3715 struct band_info *info = user;
3716 int nr;
3717 struct cuda_stmt *stmt;
3719 nr = atoi(isl_map_get_tuple_name(map, isl_dim_in) + 2);
3720 stmt = &info->gen->stmts[nr];
3722 stmt->tile_len = info->tile_len;
3723 stmt->n_parallel = info->n_parallel;
3725 isl_map_free(map);
3727 return 0;
3730 static void list_select_outer_band(struct cuda_gen *gen,
3731 __isl_take isl_band_list *list, int pos, struct band_info *list_info);
3733 /* Check if this band has any parallel loops. If so, take it as
3734 * the outermost tilable band. If not, continue looking for the
3735 * outermost tilable band in the children of the current band.
3737 static void band_select_outer_band(struct cuda_gen *gen,
3738 __isl_take isl_band *band, int pos, struct band_info *info)
3740 int n = isl_band_n_member(band);
3741 int n_parallel;
3743 for (n_parallel = 0; n_parallel < n; ++n_parallel)
3744 if (!isl_band_member_is_zero_distance(band, n_parallel))
3745 break;
3747 info->n_parallel = n_parallel;
3748 if (n_parallel) {
3749 info->gen = gen;
3750 info->tile_first = pos;
3751 info->tile_len = n;
3752 info->prefix = isl_band_get_prefix_schedule(band);
3753 info->suffix = isl_union_map_flat_range_product(
3754 isl_band_get_partial_schedule(band),
3755 isl_band_get_suffix_schedule(band));
3756 isl_union_map_foreach_map(info->prefix,
3757 &set_stmt_tile_len, info);
3758 } else {
3759 isl_band_list *children;
3760 assert(isl_band_has_children(band));
3761 children = isl_band_get_children(band);
3762 list_select_outer_band(gen, children, pos + n, info);
3765 isl_band_free(band);
3768 /* Comparison function that returns a non-zero value for band_infos
3769 * with different tile_len fields or different n_parallel fields.
3771 static int cmp_band(const void *p1, const void *p2)
3773 const struct band_info *info1 = p1;
3774 const struct band_info *info2 = p2;
3776 if (info1->tile_len != info2->tile_len)
3777 return info1->tile_len - info2->tile_len;
3779 return info1->n_parallel - info2->n_parallel;
3782 /* Extend "umap" with coordinates with fixed value "val"
3783 * to a total length of "dst_len", assuming the original dimension is "src_len".
3785 static __isl_give isl_union_map *extend_range(__isl_take isl_union_map *umap,
3786 int src_len, int dst_len, int val)
3788 isl_dim *dim;
3789 isl_map *map;
3790 int i;
3792 dim = isl_union_map_get_dim(umap);
3793 map = isl_map_reverse(projection(dim, dst_len, src_len));
3794 for (i = src_len; i < dst_len; ++i)
3795 map = isl_map_fix_si(map, isl_dim_out, i, val);
3797 umap = isl_union_map_apply_range(umap, isl_union_map_from_map(map));
3799 return umap;
3802 /* Group bands with the same values for tile_len and n_parallel.
3803 * The prefix schedule is then extended with a fixed coordinate that
3804 * is different for each such group.
3805 * Note that the actual values for this coordinate are not important.
3806 * The bands have already been effectively separated at a higher level
3807 * or they are independent and may be executed in parallel.
3808 * The list of band_info has been sorted before this functions is called.
3810 static void separate_bands(struct band_info *info, int n)
3812 int i;
3813 int j = 0;
3815 for (i = 0; i < n; ++i) {
3816 int l = info[i].tile_first;
3818 if (i &&
3819 (info[i].tile_len != info[i - 1].tile_len ||
3820 info[i].n_parallel != info[i - 1].n_parallel))
3821 j++;
3823 info[i].prefix = extend_range(info[i].prefix,
3824 l, l + 1, j);
3825 info[i].tile_first = l + 1;
3829 /* Select the outermost bands in the elements of the list, align
3830 * their prefix schedules, separate bands with different values
3831 * for tile_len and/or n_parallel and then combine the resulting
3832 * prefix and suffix schedules into a single pair of prefix and
3833 * suffix schedules for the entire list.
3835 static void list_select_outer_band(struct cuda_gen *gen,
3836 __isl_take isl_band_list *list, int pos, struct band_info *list_info)
3838 isl_band *band;
3839 int i;
3840 int n = isl_band_list_n_band(list);
3841 isl_ctx *ctx = isl_band_list_get_ctx(list);
3842 struct band_info *info;
3843 int max_tile_first;
3844 isl_union_map *prefix;
3845 isl_union_map *suffix;
3847 assert(n >= 1);
3848 info = isl_calloc_array(ctx, struct band_info, n);
3849 assert(info);
3851 max_tile_first = 0;
3852 for (i = 0; i < n; ++i) {
3853 band = isl_band_list_get_band(list, i);
3854 band_select_outer_band(gen, band, pos, &info[i]);
3855 if (info[i].tile_first > max_tile_first)
3856 max_tile_first = info[i].tile_first;
3859 for (i = 0; i < n; ++i) {
3860 if (info[i].tile_first == max_tile_first)
3861 continue;
3862 info[i].prefix = extend_range(info[i].prefix,
3863 info[i].tile_first, max_tile_first, 0);
3866 qsort(info, n, sizeof(struct band_info), &cmp_band);
3868 for (i = 0; i < n - 1; ++i)
3869 if (info[i].tile_len != info[i + 1].tile_len ||
3870 info[i].n_parallel != info[i + 1].n_parallel)
3871 break;
3873 if (i < n -1)
3874 separate_bands(info, n);
3876 prefix = info[0].prefix;
3877 suffix = info[0].suffix;
3879 for (i = 1; i < n; ++i) {
3880 prefix = isl_union_map_union(prefix, info[i].prefix);
3881 suffix = isl_union_map_union(suffix, info[i].suffix);
3884 list_info->tile_first = info[0].tile_first;
3885 list_info->tile_len = -1;
3886 list_info->prefix = prefix;
3887 list_info->suffix = suffix;
3889 isl_band_list_free(list);
3890 free(info);
3893 /* Set max_out to the maximal number of output dimensions over
3894 * all maps.
3896 static int update_max_out(__isl_take isl_map *map, void *user)
3898 int *max_out = user;
3899 int n_out = isl_map_dim(map, isl_dim_out);
3901 if (n_out > *max_out)
3902 *max_out = n_out;
3904 isl_map_free(map);
3905 return 0;
3908 struct align_range_data {
3909 int max_out;
3910 isl_union_map *res;
3913 /* Extend the dimension of the range of the given map to data->max_out and
3914 * then add the result to data->res.
3916 static int map_align_range(__isl_take isl_map *map, void *user)
3918 struct align_range_data *data = user;
3919 int i;
3920 isl_dim *dim;
3921 isl_map *proj;
3922 int n_out = isl_map_dim(map, isl_dim_out);
3924 dim = isl_union_map_get_dim(data->res);
3925 proj = isl_map_reverse(projection(dim, data->max_out, n_out));
3926 for (i = n_out; i < data->max_out; ++i)
3927 proj = isl_map_fix_si(proj, isl_dim_out, i, 0);
3929 map = isl_map_apply_range(map, proj);
3931 data->res = isl_union_map_add_map(data->res, map);
3933 return 0;
3936 /* Extend the ranges of the maps in the union map such they all have
3937 * the same dimension.
3939 static __isl_give isl_union_map *align_range(__isl_take isl_union_map *umap)
3941 struct align_range_data data;
3943 data.max_out = 0;
3944 isl_union_map_foreach_map(umap, &update_max_out, &data.max_out);
3946 data.res = isl_union_map_empty(isl_union_map_get_dim(umap));
3947 isl_union_map_foreach_map(umap, &map_align_range, &data);
3949 isl_union_map_free(umap);
3950 return data.res;
3953 /* Select the outermost tilable band that (by construction)
3954 * has at least one parallel loop.
3955 * The starting position of the aligned band is stored in the pair
3956 * gen->tile_first.
3957 * The sizes and number of parallel loops may be different in different
3958 * parts of the band forest and are therefore stored in the cuda_stmts.
3960 * Return the complete schedule, with the tilable bands aligned
3961 * at gen->tile_first and padded with zero, if needed.
3963 static __isl_give isl_union_map *select_outer_tilable_band(struct cuda_gen *gen,
3964 __isl_keep isl_schedule *schedule)
3966 isl_band_list *list;
3967 struct band_info info;
3969 gen->n_parallel = 0;
3970 gen->tile_len = -1;
3972 list = isl_schedule_get_band_forest(schedule);
3974 list_select_outer_band(gen, list, 0, &info);
3976 gen->tile_first = info.tile_first;
3977 info.suffix = align_range(info.suffix);
3979 return isl_union_map_flat_range_product(info.prefix, info.suffix);
3982 /* Set gen->untiled_len to the number of scheduling dimensions
3983 * for the schedule of the first domain.
3984 * We assume here that this number is the same for all domains.
3986 static int set_untiled_len(__isl_take isl_map *map, void *user)
3988 unsigned *untiled_len = user;
3990 *untiled_len = isl_map_dim(map, isl_dim_out);
3992 isl_map_free(map);
3993 return -1;
3996 /* Compute an appropriate schedule based on the accesses in
3997 * gen->read and gen->write.
3999 * We first compute dependences and then use those to compute
4000 * a schedule that has a parallel loop in each tilable band.
4001 * Finally, we select the outermost tilable band.
4003 static void compute_schedule(struct cuda_gen *gen,
4004 __isl_take isl_union_map *sched)
4006 isl_ctx *ctx = isl_union_map_get_ctx(sched);
4007 isl_union_set *domain;
4008 isl_union_map *empty;
4009 isl_union_map *dep_raw, *dep2, *dep3, *dep;
4010 isl_union_map *uninitialized;
4011 isl_schedule *schedule;
4012 struct isl_options *options;
4014 empty = isl_union_map_empty(isl_union_map_get_dim(sched));
4016 isl_union_map_compute_flow(isl_union_map_copy(gen->read),
4017 isl_union_map_copy(gen->write), empty,
4018 isl_union_map_copy(sched),
4019 &dep_raw, NULL, &uninitialized, NULL);
4020 isl_union_map_compute_flow(isl_union_map_copy(gen->write),
4021 isl_union_map_copy(gen->write),
4022 isl_union_map_copy(gen->read),
4023 isl_union_map_copy(sched),
4024 &dep2, &dep3, NULL, NULL);
4025 isl_union_map_free(sched);
4027 gen->copy_in = isl_union_map_range(uninitialized);
4029 dep = isl_union_map_union(dep2, dep3);
4030 dep = isl_union_map_union(dep, dep_raw);
4031 dep = isl_union_map_coalesce(dep);
4033 domain = extract_domain(gen);
4034 options = isl_ctx_peek_options(ctx, isl_options_arg);
4035 options->schedule_outer_zero_distance = 1;
4036 schedule = isl_union_set_compute_schedule(isl_union_set_copy(domain),
4037 isl_union_map_copy(dep), dep);
4039 sched = select_outer_tilable_band(gen, schedule);
4041 isl_union_map_foreach_map(sched, &set_untiled_len, &gen->untiled_len);
4042 sched = isl_union_map_intersect_domain(sched, domain);
4043 gen->sched = sched;
4045 isl_schedule_free(schedule);
4048 /* Replace the scop in the "input" file by equivalent code
4049 * that uses the GPU. "scop" is assumed to correspond to this scop.
4051 * We first compute a schedule that respects the dependences
4052 * of the original program and select the outermost band
4053 * of tilable dimensions that has at least one parallel loop.
4054 * We then have three blocks of dimensions
4056 * H B G
4058 * The tilable band "B" is first tiled according to "tile.sizes", resulting
4059 * in
4061 * H T P G
4063 * For each iteration of the T loop and for each array, we compute
4064 * the array elements accessed by that iteration, construct a rectangular
4065 * box around it and shift it to the origin. The result is used
4066 * as shared memory for the array.
4068 * We then split off at most 2 parallel loops from the T loops and
4069 * at most 3 parallel loops from the P loops
4071 * H T1 T2 P1 P2 G
4073 * The T1/P1 loops are then tiled or "wrapped" over the blocks/threads,
4074 * according to "grid.sizes"/"block.sizes".
4076 * H T1T T1P T2 P1T P1P P2 G
4078 * Finally, the T1P and P1P iterators are equated to the block and
4079 * thread dimensions respectively and so are effectively removed.
4080 * The H loops are run on the host. The T1T, T2, P1T, P2 and G loops
4081 * are run on the GPU.
4083 * Code is generated in three stages. We first generate code for the
4084 * host (the H loops), with iterators h%d. Then, for each leaf node
4085 * of the resulting AST, we generate code for the shared loops (up to
4086 * and including T2), with iterators g%d and after equating the H loops
4087 * to h%d parameters and the T1P loops to the block dimensions.
4088 * Finally, we generate code for the remaining loops in a similar fashion.
4090 * The function frees "scop" and "ctx".
4092 int cuda_scop(isl_ctx *ctx, scoplib_scop_p scop, struct ppcg_options *options,
4093 const char *input)
4095 isl_union_map *sched;
4096 struct cuda_gen gen;
4098 gen.ctx = ctx;
4099 gen.context = extract_context(gen.ctx, scop);
4100 gen.context = add_context_from_str(gen.context, options->ctx);
4101 gen.n_stmts = scoplib_statement_number(scop->statement);
4102 gen.stmts = extract_stmts(gen.ctx, scop, gen.context);
4103 extract_accesses(&gen, scop);
4104 gen.options = options;
4105 gen.state = cloog_isl_state_malloc(gen.ctx);
4107 cuda_open_files(&gen.cuda, input);
4109 collect_array_info(&gen);
4111 sched = extract_original_schedule(&gen, scop);
4112 compute_schedule(&gen, sched);
4114 print_host_code(&gen);
4116 cloog_state_free(gen.state);
4117 clear_cuda_gen(&gen);
4118 isl_ctx_free(gen.ctx);
4119 scoplib_scop_free(scop);
4121 cuda_close_files(&gen.cuda);
4123 return 0;