replace clan by pet
[ppcg.git] / cuda.c
blob41731e11af17d881240487aee35eb3f4ab5b3d6c
1 /*
2 * Copyright 2010-2011 INRIA Saclay
4 * Use of this software is governed by the GNU LGPLv2.1 license
6 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
7 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
8 * 91893 Orsay, France
9 */
11 #include <assert.h>
12 #include <stdlib.h>
14 #include <isl/polynomial.h>
15 #include <isl/union_set.h>
16 #include <isl/aff.h>
17 #include <isl/ilp.h>
18 #include <isl/flow.h>
19 #include <isl/band.h>
20 #include <isl/schedule.h>
21 #include <isl/options.h>
22 #include <cloog/isl/cloog.h>
24 #include "cuda.h"
25 #include "cuda_common.h"
26 #include "gpucode.h"
27 #include "schedule.h"
28 #include "ppcg_options.h"
30 /* The fields stride, shift and shift_map only contain valid information
31 * if shift != NULL.
32 * If so, they express that current index is such that if you add shift,
33 * then the result is always a multiple of stride.
34 * shift_map contains the mapping
36 * i -> (i + shift)/stride
38 struct cuda_array_bound {
39 isl_int size;
40 isl_aff *lb;
42 isl_int stride;
43 isl_qpolynomial *shift;
44 isl_basic_map *shift_map;
47 struct cuda_array_info;
49 /* A group of array references in a kernel that should be handled together.
50 * If private_bound is not NULL, then it is mapped to registers.
51 * Otherwise, if shared_bound is not NULL, it is mapped to shared memory.
52 * Otherwise, it is accesses from global memory.
54 struct cuda_array_ref_group {
55 /* The references in this group access this array. */
56 struct cuda_array_info *array;
57 /* Position of this group in the list of reference groups of array. */
58 int nr;
60 /* The following fields are use during the construction of the groups.
61 * access is the combined access relation relative to the shared
62 * memory tiling.
63 * write is set if any access in the group is a write.
65 isl_map *access;
66 int write;
68 /* For each index, size and offset of piece in shared memory. */
69 struct cuda_array_bound *shared_bound;
71 /* For each index, size and offset of piece in private memory. */
72 struct cuda_array_bound *private_bound;
74 /* References in this group; point to elements of a linked list. */
75 int n_ref;
76 struct cuda_stmt_access **refs;
79 struct cuda_array_info {
80 isl_dim *dim;
81 /* Name of the array. */
82 char *name;
83 /* Number of indices. */
84 unsigned n_index;
85 /* For each index, a bound on the array in that direction. */
86 isl_pw_aff **bound;
87 /* For each index, bound[i] specialized to the current kernel. */
88 isl_pw_aff **local_bound;
90 /* All references to this array; point to elements of a linked list. */
91 int n_ref;
92 struct cuda_stmt_access **refs;
94 /* The reference groups associated to this array. */
95 int n_group;
96 struct cuda_array_ref_group **groups;
98 /* Last shared memory tile dimension that affects tile of this array. */
99 int last_shared;
100 /* Dimension at which copying to/from shared memory is printed.
101 * if >= 0, then the value is >= last_shared
102 * if -1, then the copying is done at the leaf level.
104 int print_shared_level;
107 /* Print the name of the local copy of a given group of array references.
109 static void print_array_name(FILE *out, struct cuda_array_ref_group *group)
111 int global = 0;
113 if (group->private_bound)
114 fprintf(out, "private_");
115 else if (group->shared_bound)
116 fprintf(out, "shared_");
117 else
118 global = 1;
119 fprintf(out, "%s", group->array->name);
120 if (!global && group->array->n_group > 1)
121 fprintf(out, "_%d", group->nr);
124 /* Collect all references to the given array and store pointers to them
125 * in array->refs.
127 static void collect_references(struct cuda_gen *gen,
128 struct cuda_array_info *array)
130 int i;
131 int n;
133 n = 0;
134 for (i = 0; i < gen->n_stmts; ++i) {
135 struct cuda_stmt *stmt = &gen->stmts[i];
136 struct cuda_stmt_access *access;
138 for (access = stmt->accesses; access; access = access->next) {
139 const char *name;
140 name = isl_map_get_tuple_name(access->access,
141 isl_dim_out);
142 if (name && !strcmp(array->name, name))
143 n++;
147 array->n_ref = n;
148 array->refs = isl_alloc_array(gen->ctx, struct cuda_stmt_access *, n);
149 assert(array->refs);
151 n = 0;
152 for (i = 0; i < gen->n_stmts; ++i) {
153 struct cuda_stmt *stmt = &gen->stmts[i];
154 struct cuda_stmt_access *access;
156 for (access = stmt->accesses; access; access = access->next) {
157 const char *name;
158 name = isl_map_get_tuple_name(access->access,
159 isl_dim_out);
160 if (!name || strcmp(array->name, name))
161 continue;
163 array->refs[n++] = access;
168 static struct cuda_array_bound *create_bound_list(isl_ctx *ctx, int n_index)
170 int i;
171 struct cuda_array_bound *bound;
173 bound = isl_alloc_array(ctx, struct cuda_array_bound, n_index);
174 assert(bound);
176 for (i = 0; i < n_index; ++i) {
177 isl_int_init(bound[i].size);
178 bound[i].lb = NULL;
179 isl_int_init(bound[i].stride);
180 bound[i].shift = NULL;
181 bound[i].shift_map = NULL;
184 return bound;
187 static void free_bound_list(struct cuda_array_bound *bound, int n_index)
189 int j;
191 if (!bound)
192 return;
194 for (j = 0; j < n_index; ++j) {
195 isl_int_clear(bound[j].size);
196 isl_int_clear(bound[j].stride);
197 isl_aff_free(bound[j].lb);
198 isl_qpolynomial_free(bound[j].shift);
199 isl_basic_map_free(bound[j].shift_map);
201 free(bound);
204 /* Compute bounds on the host arrays based on the accessed elements
205 * and collect all references to the array.
207 static int extract_array_info(__isl_take isl_set *array, void *user)
209 int i;
210 struct cuda_gen *gen = (struct cuda_gen *)user;
211 const char *name;
212 int n_index;
213 isl_pw_aff **bounds;
214 isl_pw_aff **local_bounds;
216 n_index = isl_set_dim(array, isl_dim_set);
217 name = isl_set_get_tuple_name(array);
218 bounds = isl_alloc_array(isl_set_get_ctx(array),
219 isl_pw_aff *, n_index);
220 assert(bounds);
221 local_bounds = isl_calloc_array(isl_set_get_ctx(array),
222 isl_pw_aff *, n_index);
223 assert(local_bounds);
224 gen->array[gen->n_array].dim = isl_set_get_dim(array);
225 gen->array[gen->n_array].name = strdup(name);
226 gen->array[gen->n_array].n_index = n_index;
227 gen->array[gen->n_array].bound = bounds;
228 gen->array[gen->n_array].local_bound = local_bounds;
230 for (i = 0; i < n_index; ++i) {
231 isl_set *dom;
232 isl_local_space *ls;
233 isl_aff *one;
234 isl_pw_aff *bound;
236 bound = isl_set_dim_max(isl_set_copy(array), i);
237 dom = isl_pw_aff_domain(isl_pw_aff_copy(bound));
238 ls = isl_local_space_from_dim(isl_set_get_dim(dom));
239 one = isl_aff_zero(ls);
240 one = isl_aff_add_constant_si(one, 1);
241 bound = isl_pw_aff_add(bound, isl_pw_aff_alloc(dom, one));
242 bound = isl_pw_aff_gist(bound, isl_set_copy(gen->context));
244 bounds[i] = bound;
247 collect_references(gen, &gen->array[gen->n_array]);
249 gen->n_array++;
251 isl_set_free(array);
252 return 0;
255 void collect_array_info(struct cuda_gen *gen)
257 isl_union_set *arrays;
259 arrays = isl_union_map_range(isl_union_map_copy(gen->read));
260 arrays = isl_union_set_union(arrays,
261 isl_union_map_range(isl_union_map_copy(gen->write)));
262 arrays = isl_union_set_coalesce(arrays);
264 gen->n_array = isl_union_set_n_set(arrays);
265 gen->array = isl_alloc_array(gen->ctx,
266 struct cuda_array_info, gen->n_array);
267 assert(gen->array);
268 gen->n_array = 0;
269 isl_union_set_foreach_set(arrays, &extract_array_info, gen);
270 isl_union_set_free(arrays);
273 static void free_array_info(struct cuda_gen *gen)
275 int i, j;
277 for (i = 0; i < gen->n_array; ++i) {
278 int n_index = gen->array[i].n_index;
279 free(gen->array[i].name);
280 for (j = 0; j < n_index; ++j) {
281 isl_pw_aff_free(gen->array[i].bound[j]);
282 isl_pw_aff_free(gen->array[i].local_bound[j]);
284 isl_dim_free(gen->array[i].dim);
285 free(gen->array[i].bound);
286 free(gen->array[i].local_bound);
287 free(gen->array[i].refs);
289 free(gen->array);
292 static void declare_device_arrays(struct cuda_gen *gen)
294 int i;
296 for (i = 0; i < gen->n_array; ++i)
297 fprintf(gen->cuda.host_c, "%s *dev_%s;\n",
298 gen->options->type, gen->array[i].name);
301 static void print_array_size(struct cuda_gen *gen, FILE *out,
302 struct cuda_array_info *array)
304 int i;
305 isl_printer *prn;
307 prn = isl_printer_to_file(gen->ctx, out);
308 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
309 for (i = 0; i < array->n_index; ++i) {
310 prn = isl_printer_print_str(prn, "(");
311 prn = isl_printer_print_pw_aff(prn, array->bound[i]);
312 prn = isl_printer_print_str(prn, ") * ");
314 prn = isl_printer_print_str(prn, "sizeof(");
315 prn = isl_printer_print_str(prn, gen->options->type);
316 prn = isl_printer_print_str(prn, ")");
317 isl_printer_free(prn);
320 static void allocate_device_arrays(struct cuda_gen *gen)
322 int i;
324 for (i = 0; i < gen->n_array; ++i) {
325 fprintf(gen->cuda.host_c, "cudaMalloc(&dev_%s, ",
326 gen->array[i].name);
327 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
328 fprintf(gen->cuda.host_c, ");\n");
332 static void free_device_arrays(struct cuda_gen *gen)
334 int i;
336 for (i = 0; i < gen->n_array; ++i)
337 fprintf(gen->cuda.host_c, "cudaFree(dev_%s);\n",
338 gen->array[i].name);
341 static void copy_arrays_to_device(struct cuda_gen *gen)
343 int i;
345 for (i = 0; i < gen->n_array; ++i) {
346 isl_dim *dim;
347 isl_set *read_i;
348 int empty;
350 dim = isl_dim_copy(gen->array[i].dim);
351 read_i = isl_union_set_extract_set(gen->copy_in, dim);
352 empty = isl_set_fast_is_empty(read_i);
353 isl_set_free(read_i);
354 if (empty)
355 continue;
357 fprintf(gen->cuda.host_c, "assert(sizeof(%s) == ",
358 gen->array[i].name);
359 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
360 fprintf(gen->cuda.host_c, ");\n");
361 fprintf(gen->cuda.host_c, "cudaMemcpy(dev_%s, %s, ",
362 gen->array[i].name, gen->array[i].name);
363 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
364 fprintf(gen->cuda.host_c, ", cudaMemcpyHostToDevice);\n");
368 static void copy_arrays_from_device(struct cuda_gen *gen)
370 int i;
371 isl_union_set *write;
372 write = isl_union_map_range(isl_union_map_copy(gen->write));
374 for (i = 0; i < gen->n_array; ++i) {
375 isl_dim *dim;
376 isl_set *write_i;
377 int empty;
379 dim = isl_dim_copy(gen->array[i].dim);
380 write_i = isl_union_set_extract_set(write, dim);
381 empty = isl_set_fast_is_empty(write_i);
382 isl_set_free(write_i);
383 if (empty)
384 continue;
386 fprintf(gen->cuda.host_c, "cudaMemcpy(%s, dev_%s, ",
387 gen->array[i].name, gen->array[i].name);
388 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
389 fprintf(gen->cuda.host_c, ", cudaMemcpyDeviceToHost);\n");
392 isl_union_set_free(write);
395 static void read_sizes_from_file(struct cuda_gen *gen, const char *filename,
396 int *sizes, int len)
398 int i;
399 FILE *file;
401 file = fopen(filename, "r");
402 if (!file)
403 return;
405 for (i = 0; i < len; ++i)
406 if (fscanf(file, "%d", &sizes[i]) < 1)
407 break;
409 fclose(file);
412 static void reverse_list(int *list, int len)
414 int i;
415 int t;
417 for (i = 0; 2 * i < len; ++i) {
418 t = list[i];
419 list[i] = list[len - 1 - i];
420 list[len - 1 - i] = t;
424 /* Read user specified sizes from "tile.sizes", "block.sizes" and "grid.sizes"
425 * after filling in some potentially useful defaults.
427 static void read_sizes(struct cuda_gen *gen)
429 int n;
431 gen->tile_size = isl_alloc_array(gen->ctx, int, gen->tile_len);
432 assert(gen->tile_size);
433 for (n = 0; n < gen->tile_len; ++n)
434 gen->tile_size[n] = gen->options->tile_size;
435 read_sizes_from_file(gen, "tile.sizes", gen->tile_size, gen->tile_len);
437 n = gen->n_parallel;
438 gen->n_block = (n <= 3) ? n : 3;
439 switch (gen->n_block) {
440 case 1:
441 gen->block_dim[0] = 512;
442 break;
443 case 2:
444 gen->block_dim[0] = 32;
445 gen->block_dim[1] = 16;
446 break;
447 default:
448 gen->block_dim[0] = 32;
449 gen->block_dim[1] = 4;
450 gen->block_dim[2] = 4;
451 break;
453 read_sizes_from_file(gen, "block.sizes", gen->block_dim, gen->n_block);
454 reverse_list(gen->block_dim, gen->n_block);
456 gen->n_grid = (n <= 2) ? n : 2;
457 switch (gen->n_grid) {
458 case 1:
459 gen->grid_dim[0] = 65536;
460 break;
461 default:
462 gen->grid_dim[0] = 256;
463 gen->grid_dim[1] = 256;
464 break;
466 read_sizes_from_file(gen, "grid.sizes", gen->grid_dim, gen->n_grid);
467 reverse_list(gen->grid_dim, gen->n_grid);
470 static void free_stmts(struct cuda_stmt *stmts, int n)
472 int i;
474 for (i = 0; i < n; ++i) {
475 struct cuda_stmt_access *access, *next;
477 for (access = stmts[i].accesses; access; access = next) {
478 next = access->next;
479 isl_map_free(access->access);
480 free(access);
483 isl_set_free(stmts[i].domain);
485 free(stmts);
488 void clear_cuda_gen(struct cuda_gen *gen)
490 free_stmts(gen->stmts, gen->n_stmts);
491 free_array_info(gen);
492 isl_set_free(gen->context);
493 isl_union_set_free(gen->copy_in);
494 isl_union_map_free(gen->sched);
495 isl_union_map_free(gen->read);
496 isl_union_map_free(gen->write);
499 static void print_reverse_list(FILE *out, int len, int *list)
501 int i;
503 for (i = 0; i < len; ++i) {
504 if (i)
505 fprintf(out, ", ");
506 fprintf(out, "%d", list[len - 1 - i]);
510 static void print_kernel_launch(struct cuda_gen *gen,
511 __isl_keep isl_union_set *arrays)
513 int i;
514 int first = 1;
515 unsigned nparam;
516 isl_dim *dim;
518 print_indent(gen->code.dst, gen->code.indent);
519 fprintf(gen->code.dst, "kernel%d <<<k%d_dimGrid, k%d_dimBlock>>> (",
520 gen->kernel_id, gen->kernel_id, gen->kernel_id);
521 fprintf(gen->cuda.kernel_c, "__global__ void kernel%d(",
522 gen->kernel_id);
523 fprintf(gen->cuda.kernel_h, "__global__ void kernel%d(",
524 gen->kernel_id);
526 for (i = 0; i < gen->n_array; ++i) {
527 isl_dim *dim;
528 isl_set *arr;
529 int empty;
531 dim = isl_dim_copy(gen->array[i].dim);
532 arr = isl_union_set_extract_set(arrays, dim);
533 empty = isl_set_fast_is_empty(arr);
534 isl_set_free(arr);
535 if (empty)
536 continue;
538 if (!first) {
539 fprintf(gen->code.dst, ", ");
540 fprintf(gen->cuda.kernel_c, ", ");
541 fprintf(gen->cuda.kernel_h, ", ");
544 fprintf(gen->code.dst, "dev_%s", gen->array[i].name);
545 fprintf(gen->cuda.kernel_c, "%s *%s",
546 gen->options->type, gen->array[i].name);
547 fprintf(gen->cuda.kernel_h, "%s *%s",
548 gen->options->type, gen->array[i].name);
550 first = 0;
553 dim = isl_union_set_get_dim(arrays);
554 nparam = isl_dim_size(dim, isl_dim_param);
555 for (i = 0; i < nparam; ++i) {
556 const char *name = isl_dim_get_name(dim, isl_dim_param, i);
557 if (!first) {
558 fprintf(gen->code.dst, ", ");
559 fprintf(gen->cuda.kernel_c, ", ");
560 fprintf(gen->cuda.kernel_h, ", ");
562 fprintf(gen->code.dst, "%s", name);
563 fprintf(gen->cuda.kernel_c, "int %s", name);
564 fprintf(gen->cuda.kernel_h, "int %s", name);
565 first = 0;
567 isl_dim_free(dim);
569 for (i = 0; i < gen->tile_first; ++i) {
570 if (!first) {
571 fprintf(gen->code.dst, ", ");
572 fprintf(gen->cuda.kernel_c, ", ");
573 fprintf(gen->cuda.kernel_h, ", ");
575 fprintf(gen->code.dst, "h%d", i);
576 fprintf(gen->cuda.kernel_c, "int h%d", i);
577 fprintf(gen->cuda.kernel_h, "int h%d", i);
578 first = 0;
581 fprintf(gen->code.dst, ");\n");
582 fprintf(gen->cuda.kernel_c, ")\n");
583 fprintf(gen->cuda.kernel_h, ");\n");
586 /* Construct a map from a domain of dimensionality "len"
587 * to a domain of dimensionality "len" + "tile_len" that tiles
588 * the "tile_len" coordinates starting at "first".
589 * In particular, [s_i] -> [s_i / tile_size[i], s_i % tile_size[i]].
590 * "dim" prescribes the parameters.
592 static __isl_give isl_map *tile(__isl_take isl_dim *dim, int len,
593 int first, int tile_len, int *tile_size)
595 int i;
596 isl_int v;
597 isl_basic_map *bmap;
598 isl_constraint *c;
600 isl_int_init(v);
602 dim = isl_dim_add(dim, isl_dim_in, len);
603 dim = isl_dim_add(dim, isl_dim_out, len + tile_len);
604 bmap = isl_basic_map_universe(isl_dim_copy(dim));
606 for (i = 0; i < len - tile_len; ++i) {
607 int j = i < first ? i : i + tile_len;
608 int k = i < first ? i : i + 2 * tile_len;
610 c = isl_equality_alloc(isl_dim_copy(dim));
611 isl_int_set_si(v, -1);
612 isl_constraint_set_coefficient(c, isl_dim_in, j, v);
613 isl_int_set_si(v, 1);
614 isl_constraint_set_coefficient(c, isl_dim_out, k, v);
615 bmap = isl_basic_map_add_constraint(bmap, c);
618 for (i = 0; i < tile_len; ++i) {
619 c = isl_equality_alloc(isl_dim_copy(dim));
620 isl_int_set_si(v, -1);
621 isl_constraint_set_coefficient(c, isl_dim_in, first + i, v);
622 isl_int_set_si(v, tile_size[i]);
623 isl_constraint_set_coefficient(c, isl_dim_out, first + i, v);
624 isl_int_set_si(v, 1);
625 isl_constraint_set_coefficient(c, isl_dim_out,
626 first + i + tile_len, v);
627 bmap = isl_basic_map_add_constraint(bmap, c);
629 c = isl_inequality_alloc(isl_dim_copy(dim));
630 isl_int_set_si(v, 1);
631 isl_constraint_set_coefficient(c, isl_dim_out,
632 first + i + tile_len, v);
633 bmap = isl_basic_map_add_constraint(bmap, c);
635 c = isl_inequality_alloc(isl_dim_copy(dim));
636 isl_int_set_si(v, -1);
637 isl_constraint_set_coefficient(c, isl_dim_out,
638 first + i + tile_len, v);
639 isl_int_set_si(v, tile_size[i] - 1);
640 isl_constraint_set_constant(c, v);
641 bmap = isl_basic_map_add_constraint(bmap, c);
644 isl_dim_free(dim);
645 isl_int_clear(v);
647 return isl_map_from_basic_map(bmap);
650 /* Construct a map from a domain of dimensionality "len"
651 * to a domain of dimensionality "len" + "wrap_len" that "wraps"
652 * the "wrap_len" coordinates starting at "first" according to "wrap_size".
653 * In particular, [s_i] -> [s_i, s_i % wrap_size[i]].
654 * To do so, we need extra variables corresponding to [s_i / wrap_size[i]],
655 * that are projected out at the end.
656 * "dim" prescribes the parameters.
658 static __isl_give isl_map *wrap(__isl_take isl_dim *dim, int len,
659 int first, int wrap_len, int *wrap_size)
661 int i;
662 isl_basic_map *bmap;
663 isl_constraint *c;
665 dim = isl_dim_add(dim, isl_dim_in, len);
666 dim = isl_dim_add(dim, isl_dim_out, len + 2 * wrap_len);
667 bmap = isl_basic_map_universe(isl_dim_copy(dim));
669 for (i = 0; i < len; ++i) {
670 int k = i < first + wrap_len ? i : i + 2 * wrap_len;
672 c = isl_equality_alloc(isl_dim_copy(dim));
673 isl_constraint_set_coefficient_si(c, isl_dim_in, i, -1);
674 isl_constraint_set_coefficient_si(c, isl_dim_out, k, 1);
675 bmap = isl_basic_map_add_constraint(bmap, c);
678 for (i = 0; i < wrap_len; ++i) {
679 c = isl_equality_alloc(isl_dim_copy(dim));
680 isl_constraint_set_coefficient_si(c, isl_dim_out,
681 first + i, -1);
682 isl_constraint_set_coefficient_si(c, isl_dim_out,
683 first + wrap_len + i, 1);
684 isl_constraint_set_coefficient_si(c, isl_dim_out,
685 first + 2 * wrap_len + i, wrap_size[i]);
686 bmap = isl_basic_map_add_constraint(bmap, c);
688 c = isl_inequality_alloc(isl_dim_copy(dim));
689 isl_constraint_set_coefficient_si(c, isl_dim_out,
690 first + wrap_len + i, 1);
691 bmap = isl_basic_map_add_constraint(bmap, c);
693 c = isl_inequality_alloc(isl_dim_copy(dim));
694 isl_constraint_set_coefficient_si(c, isl_dim_out,
695 first + wrap_len + i, -1);
696 isl_constraint_set_constant_si(c, wrap_size[i] - 1);
697 bmap = isl_basic_map_add_constraint(bmap, c);
700 isl_dim_free(dim);
702 bmap = isl_basic_map_project_out(bmap, isl_dim_out,
703 first + 2 * wrap_len, wrap_len);
705 return isl_map_from_basic_map(bmap);
708 /* Add "n" parameters named prefix%d.
710 static __isl_give isl_set *add_params( __isl_take isl_set *set,
711 int n, const char *prefix)
713 int i;
714 unsigned nparam;
715 char name[20];
717 nparam = isl_set_dim(set, isl_dim_param);
718 set = isl_set_add_dims(set, isl_dim_param, n);
720 for (i = 0; i < n; ++i) {
721 snprintf(name, sizeof(name), "%s%d", prefix, i);
722 set = isl_set_set_dim_name(set, isl_dim_param,
723 nparam + i, name);
726 return set;
729 /* Equate the "n" dimensions of "set" starting at "first" to
730 * freshly created parameters named prefix%d.
732 static __isl_give isl_set *parametrize(__isl_take isl_set *set,
733 int first, int n, const char *prefix)
735 int i;
736 unsigned nparam;
737 isl_int v;
738 isl_dim *dim;
739 isl_basic_set *bset;
740 isl_constraint *c;
742 nparam = isl_set_dim(set, isl_dim_param);
744 set = add_params(set, n, prefix);
746 dim = isl_set_get_dim(set);
747 bset = isl_basic_set_universe(isl_dim_copy(dim));
749 isl_int_init(v);
751 for (i = 0; i < n; ++i) {
752 c = isl_equality_alloc(isl_dim_copy(dim));
753 isl_int_set_si(v, -1);
754 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
755 isl_int_set_si(v, 1);
756 isl_constraint_set_coefficient(c, isl_dim_set, first + i, v);
757 bset = isl_basic_set_add_constraint(bset, c);
760 isl_int_clear(v);
761 isl_dim_free(dim);
763 return isl_set_intersect(set, isl_set_from_basic_set(bset));
766 static __isl_give isl_set *parametrization(__isl_take isl_dim *dim,
767 int len, int first, int n, const char *prefix)
769 isl_set *set;
771 dim = isl_dim_add(dim, isl_dim_set, len);
772 set = isl_set_universe(dim);
774 return parametrize(set, first, n, prefix);
777 /* Tile the B loops over the tile sizes and then tile/wrap
778 * the T1 loops over the blocks.
780 static __isl_give isl_union_map *tile_schedule(struct cuda_gen *gen,
781 __isl_take isl_union_map *sched)
783 isl_dim *dim;
784 isl_map *tiling, *block_tiling;
786 dim = isl_union_map_get_dim(sched);
787 tiling = tile(isl_dim_copy(dim), gen->untiled_len,
788 gen->tile_first, gen->tile_len, gen->tile_size);
790 if (gen->options->wrap)
791 block_tiling = wrap(dim, gen->untiled_len + gen->tile_len,
792 gen->tile_first, gen->n_grid, gen->grid_dim);
793 else
794 block_tiling = tile(dim, gen->untiled_len + gen->tile_len,
795 gen->tile_first, gen->n_grid, gen->grid_dim);
797 gen->tiled_len = gen->untiled_len + gen->tile_len + gen->n_grid;
799 tiling = isl_map_apply_range(tiling, block_tiling);
801 sched = isl_union_map_apply_range(sched,
802 isl_union_map_from_map(tiling));
804 gen->shared_len = gen->tile_first + gen->tile_len + gen->n_grid;
806 return sched;
809 static __isl_give isl_union_map *parametrize_tiled_schedule(
810 struct cuda_gen *gen, __isl_take isl_union_map *sched)
812 isl_dim *dim;
813 isl_set *par;
815 dim = isl_union_map_get_dim(sched);
816 par = parametrization(dim, gen->tiled_len, 0, gen->tile_first, "h");
817 sched = isl_union_map_intersect_range(sched,
818 isl_union_set_from_set(par));
820 dim = isl_union_map_get_dim(sched);
821 par = parametrization(dim, gen->tiled_len,
822 gen->tile_first + gen->n_grid, gen->n_grid, "b");
823 sched = isl_union_map_intersect_range(sched,
824 isl_union_set_from_set(par));
826 return sched;
829 /* Tile/wrap the P1 loops over the threads.
831 static __isl_give isl_union_map *thread_tile_schedule(struct cuda_gen *gen,
832 __isl_take isl_union_map *sched)
834 isl_dim *dim;
835 isl_map *tiling;
836 isl_set *par;
838 dim = isl_union_map_get_dim(sched);
840 if (gen->options->wrap)
841 tiling = wrap(isl_dim_copy(dim), gen->tiled_len,
842 gen->shared_len, gen->n_block, gen->block_dim);
843 else
844 tiling = tile(isl_dim_copy(dim), gen->tiled_len,
845 gen->shared_len, gen->n_block, gen->block_dim);
846 gen->thread_tiled_len = gen->tiled_len + gen->n_block;
848 sched = isl_union_map_apply_range(sched,
849 isl_union_map_from_map(tiling));
851 par = parametrization(dim, gen->thread_tiled_len,
852 gen->tile_first + gen->tile_len + gen->n_grid + gen->n_block,
853 gen->n_block, "t");
854 sched = isl_union_map_intersect_range(sched,
855 isl_union_set_from_set(par));
857 gen->shared_len = gen->tile_first + gen->tile_len + gen->n_grid;
859 return sched;
862 /* If the user asked for it, scale the shared memory tile loops
863 * (T1P and T2) of "sched" by gen->tile_size[i].
864 * If we are not performing "wrapping", then additionally scale the T1P
865 * loops by gen->grid_dim[i].
867 static __isl_give isl_union_map *scale_tile_loops(struct cuda_gen *gen,
868 __isl_take isl_union_map *sched)
870 int i;
871 isl_dim *dim;
872 isl_basic_map *scale;
873 isl_constraint *c;
875 if (!gen->options->scale_tile_loops)
876 return sched;
878 dim = isl_union_map_get_dim(sched);
879 dim = isl_dim_add(dim, isl_dim_in, gen->tiled_len);
880 dim = isl_dim_add(dim, isl_dim_out, gen->tiled_len);
881 scale = isl_basic_map_universe(isl_dim_copy(dim));
883 for (i = 0; i < gen->tiled_len; ++i) {
884 int f = 1;
886 if (i >= gen->tile_first && i < gen->tile_first + gen->n_grid) {
887 f = gen->tile_size[i - gen->tile_first];
888 if (!gen->options->wrap)
889 f *= gen->grid_dim[i - gen->tile_first];
890 } else if (i >= gen->tile_first + gen->n_grid &&
891 i < gen->tile_first + gen->n_grid + gen->tile_len) {
892 f = gen->tile_size[i - (gen->tile_first + gen->n_grid)];
895 c = isl_equality_alloc(isl_dim_copy(dim));
896 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
897 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
898 scale = isl_basic_map_add_constraint(scale, c);
901 isl_dim_free(dim);
903 sched = isl_union_map_apply_range(sched,
904 isl_union_map_from_map(isl_map_from_basic_map(scale)));
906 return sched;
909 /* If we are not performing "wrapping" and if the user asked for it,
910 * scale the thread tile loops (P1T) of "sched" by gen->block_dim[i].
912 static __isl_give isl_union_map *scale_thread_tile_loops(struct cuda_gen *gen,
913 __isl_take isl_union_map *sched)
915 int i;
916 isl_dim *dim;
917 isl_basic_map *scale;
918 isl_constraint *c;
920 if (gen->options->wrap)
921 return sched;
922 if (!gen->options->scale_tile_loops)
923 return sched;
925 dim = isl_union_map_get_dim(sched);
926 dim = isl_dim_add(dim, isl_dim_in, gen->thread_tiled_len);
927 dim = isl_dim_add(dim, isl_dim_out, gen->thread_tiled_len);
928 scale = isl_basic_map_universe(isl_dim_copy(dim));
930 for (i = 0; i < gen->thread_tiled_len; ++i) {
931 int f = 1;
933 if (i >= gen->shared_len &&
934 i < gen->shared_len + gen->n_block)
935 f = gen->block_dim[i - gen->shared_len];
937 c = isl_equality_alloc(isl_dim_copy(dim));
938 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
939 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
940 scale = isl_basic_map_add_constraint(scale, c);
943 isl_dim_free(dim);
945 sched = isl_union_map_apply_range(sched,
946 isl_union_map_from_map(isl_map_from_basic_map(scale)));
948 return sched;
951 /* If we are not performing "wrapping" and if the user asked for it,
952 * scale the "n_tile" loops starting at "first" of "sched" by gen->block_dim[i].
954 static __isl_give isl_union_map *scale_access_tile_loops(struct cuda_gen *gen,
955 __isl_take isl_union_map *sched, int len, int first, int n_tile)
957 int i;
958 isl_dim *dim;
959 isl_basic_map *scale;
960 isl_constraint *c;
962 if (gen->options->wrap)
963 return sched;
964 if (!gen->options->scale_tile_loops)
965 return sched;
967 dim = isl_union_map_get_dim(sched);
968 dim = isl_dim_add(dim, isl_dim_in, len);
969 dim = isl_dim_add(dim, isl_dim_out, len);
970 scale = isl_basic_map_universe(isl_dim_copy(dim));
972 for (i = 0; i < len; ++i) {
973 int f = 1;
975 if (i >= first && i < first + n_tile)
976 f = gen->block_dim[i - first];
978 c = isl_equality_alloc(isl_dim_copy(dim));
979 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
980 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
981 scale = isl_basic_map_add_constraint(scale, c);
984 isl_dim_free(dim);
986 sched = isl_union_map_apply_range(sched,
987 isl_union_map_from_map(isl_map_from_basic_map(scale)));
989 return sched;
992 /* If print_user_stmt is set, we want to print the statements ourselves,
993 * instead of relying on the C preprocessor. If so, we need to use
994 * the stop option so that the domains will be saved on the statement
995 * nodes.
997 static void print_cloog_shared_body(struct cuda_gen *gen,
998 __isl_keep isl_set *context, __isl_keep isl_union_map *sched, int len,
999 void (*print_user_stmt)(struct gpucode_info *info,
1000 struct clast_user_stmt *s),
1001 int first_unroll)
1003 int i;
1004 CloogOptions *options;
1005 CloogDomain *cloog_context;
1006 CloogUnionDomain *ud;
1007 CloogInput *input;
1008 struct clast_stmt *stmt;
1009 char name[20];
1011 sched = isl_union_map_copy(sched);
1012 sched = isl_union_map_align_params(sched, isl_set_get_dim(context));
1014 options = cloog_options_malloc(gen->state);
1015 options->language = LANGUAGE_C;
1016 options->strides = 1;
1017 options->sh = 1;
1018 options->f = len;
1019 options->l = -1;
1020 options->override = 1;
1021 options->save_domains = 1;
1022 options->noscalars = 1;
1023 options->first_unroll = first_unroll;
1025 ud = cloog_union_domain_from_isl_union_map(sched);
1026 for (i = 0; i < len; ++i) {
1027 snprintf(name, sizeof(name), "c%d", i);
1028 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
1030 cloog_context = cloog_domain_from_isl_set(isl_set_copy(context));
1031 input = cloog_input_alloc(cloog_context, ud);
1033 stmt = cloog_clast_create_from_input(input, options);
1035 gen->stmt_code.indent = gen->kernel_code.indent;
1036 gen->stmt_code.dst = gen->cuda.kernel_c;
1037 gen->stmt_code.print_user_stmt = print_user_stmt;
1038 gen->stmt_code.print_user_stmt_list = NULL;
1039 gen->stmt_code.print_for_head = NULL;
1040 gen->stmt_code.print_for_foot = NULL;
1041 gen->stmt_code.user = gen;
1042 gpu_print_host_stmt(&gen->stmt_code, stmt);
1044 cloog_clast_free(stmt);
1045 cloog_options_free(options);
1048 /* Add "len" parameters p[i] called prefix%d,
1049 * with bounds to 0 <= p[i] < size[i].
1051 __isl_give isl_set *add_bounded_parameters(__isl_take isl_set *set,
1052 int len, int *size, const char *prefix)
1054 int i;
1055 unsigned nparam;
1056 isl_int v;
1057 isl_dim *dim;
1058 isl_basic_set *bset;
1059 isl_constraint *c;
1060 char name[20];
1062 nparam = isl_set_dim(set, isl_dim_param);
1063 set = isl_set_add_dims(set, isl_dim_param, len);
1065 for (i = 0; i < len; ++i) {
1066 snprintf(name, sizeof(name), "%s%d", prefix, i);
1067 set = isl_set_set_dim_name(set, isl_dim_param,
1068 nparam + i, name);
1071 dim = isl_set_get_dim(set);
1072 bset = isl_basic_set_universe(isl_dim_copy(dim));
1074 isl_int_init(v);
1076 for (i = 0; i < len; ++i) {
1077 c = isl_inequality_alloc(isl_dim_copy(dim));
1078 isl_int_set_si(v, 1);
1079 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
1080 bset = isl_basic_set_add_constraint(bset, c);
1082 c = isl_inequality_alloc(isl_dim_copy(dim));
1083 isl_int_set_si(v, -1);
1084 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
1085 isl_int_set_si(v, size[i] - 1);
1086 isl_constraint_set_constant(c, v);
1087 bset = isl_basic_set_add_constraint(bset, c);
1090 isl_int_clear(v);
1091 isl_dim_free(dim);
1093 return isl_set_intersect(set, isl_set_from_basic_set(bset));
1096 static void print_shared_body(struct cuda_gen *gen,
1097 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *sched,
1098 int len, void (*print_user_stmt)(struct gpucode_info *info,
1099 struct clast_user_stmt *s),
1100 int first_unroll)
1102 isl_set *context;
1104 context = isl_set_copy(shared_domain);
1105 context = parametrize(context, 0, gen->shared_len, "g");
1106 context = isl_set_project_out(context, isl_dim_set, 0, gen->shared_len);
1107 context = add_bounded_parameters(context,
1108 gen->n_block, gen->block_dim, "t");
1110 print_cloog_shared_body(gen, context, sched, len, print_user_stmt,
1111 first_unroll);
1113 isl_set_free(context);
1116 /* Given a tile of an array, construct a map that maps each element
1117 * of the tile to a copy of the tile shifted to the origin
1118 * (based on the lower bounds in group->private_bound or group->shared_bound).
1119 * If any of the indices is strided, then {private,shared}_bound[i].shift_map
1120 * is applied to the index first.
1121 * The domain of the resulting map is "access",
1122 * while the range space is anonymous.
1124 static __isl_give isl_map *shift_access(__isl_take isl_set *access,
1125 struct cuda_array_ref_group *group)
1127 int i;
1128 isl_dim *dim;
1129 isl_basic_set *bset;
1130 isl_basic_map *bmap;
1131 isl_aff *lb;
1132 isl_basic_set *offset;
1133 isl_basic_map *shift;
1134 isl_basic_map *pre_shift;
1135 isl_map *sched;
1136 const char *name;
1137 struct cuda_array_bound *bounds;
1138 int n_index = group->array->n_index;
1140 bounds = group->private_bound;
1141 if (!bounds)
1142 bounds = group->shared_bound;
1144 dim = isl_set_get_dim(access);
1145 dim = isl_dim_drop(dim, isl_dim_set, 0, n_index);
1146 offset = isl_basic_set_universe(dim);
1147 for (i = 0; i < n_index; ++i) {
1148 lb = isl_aff_copy(bounds[i].lb);
1149 bmap = isl_basic_map_from_aff(lb);
1150 bset = isl_basic_map_range(bmap);
1151 offset = isl_basic_set_flat_product(offset, bset);
1153 offset = isl_basic_set_neg(offset);
1155 dim = isl_dim_map_from_set(isl_set_get_dim(access));
1156 shift = isl_basic_map_identity(dim);
1157 shift = isl_basic_map_set_tuple_name(shift, isl_dim_out, NULL);
1159 bset = isl_basic_set_universe(isl_set_get_dim(access));
1160 bmap = isl_basic_map_from_domain_and_range(bset, offset);
1162 shift = isl_basic_map_sum(shift, bmap);
1164 dim = isl_set_get_dim(access);
1165 dim = isl_dim_drop(dim, isl_dim_set, 0, n_index);
1166 dim = isl_dim_map_from_set(dim);
1167 pre_shift = isl_basic_map_universe(isl_dim_copy(dim));
1168 dim = isl_dim_add(dim, isl_dim_in, 1);
1169 dim = isl_dim_add(dim, isl_dim_out, 1);
1170 for (i = 0; i < n_index; ++i) {
1171 if (!bounds[i].shift_map)
1172 bmap = isl_basic_map_identity(isl_dim_copy(dim));
1173 else
1174 bmap = isl_basic_map_copy(bounds[i].shift_map);
1175 pre_shift = isl_basic_map_flat_product(pre_shift, bmap);
1177 isl_dim_free(dim);
1178 name = isl_basic_map_get_tuple_name(shift, isl_dim_in);
1179 pre_shift = isl_basic_map_set_tuple_name(pre_shift, isl_dim_in, name);
1180 pre_shift = isl_basic_map_set_tuple_name(pre_shift, isl_dim_out, name);
1181 shift = isl_basic_map_apply_range(pre_shift, shift);
1183 sched = isl_map_from_basic_map(shift);
1184 sched = isl_map_intersect_domain(sched, access);
1186 return sched;
1189 /* Construct a schedule for iterating over all elements in the given
1190 * piece of an array. The schedule iterates over a copy of the piece
1191 * that is shifted to the origin.
1192 * We subsequently also perform the tiling/wrapping over the threads.
1194 * In particular, we tile the final iterators so that the final thread
1195 * dimension runs over the final array dimension.
1196 * However, if those final iterators have only a single iteration,
1197 * we try to tile earlier iterators instead.
1199 static __isl_give isl_union_map *access_schedule(struct cuda_gen *gen,
1200 __isl_take isl_set *access, struct cuda_array_ref_group *group)
1202 isl_dim *dim;
1203 isl_map *sched;
1204 isl_union_map *usched;
1205 isl_map *tiling;
1206 isl_set *par;
1207 unsigned nvar = isl_set_dim(access, isl_dim_set);
1208 int n_tile;
1209 int first;
1211 sched = shift_access(access, group);
1213 n_tile = gen->n_block;
1214 if (n_tile > nvar) {
1215 int i;
1216 sched = isl_map_insert(sched, isl_dim_out, 0, n_tile - nvar);
1217 for (i = 0; i < n_tile - nvar; ++i)
1218 sched = isl_map_fix_si(sched, isl_dim_out, i, 0);
1219 nvar = n_tile;
1222 first = nvar - n_tile;
1224 for (; first > 0; first --)
1225 if (!isl_map_plain_is_fixed(sched, isl_dim_out,
1226 first + n_tile - 1, NULL))
1227 break;
1229 dim = isl_map_get_dim(sched);
1230 dim = isl_dim_drop(dim, isl_dim_in, 0, isl_dim_size(dim, isl_dim_in));
1231 dim = isl_dim_drop(dim, isl_dim_out, 0, nvar);
1232 if (gen->options->wrap)
1233 tiling = wrap(isl_dim_copy(dim), nvar, first,
1234 n_tile, gen->block_dim);
1235 else
1236 tiling = tile(isl_dim_copy(dim), nvar, first,
1237 n_tile, gen->block_dim);
1238 sched = isl_map_apply_range(sched, tiling);
1240 par = parametrization(dim, nvar + n_tile, first + n_tile, n_tile, "t");
1241 usched = isl_union_map_from_map(sched);
1242 usched = isl_union_map_intersect_range(usched,
1243 isl_union_set_from_set(par));
1245 usched = scale_access_tile_loops(gen, usched, nvar + n_tile,
1246 first, n_tile);
1248 return usched;
1251 static void print_shared_access(struct cuda_gen *gen,
1252 __isl_keep isl_set *shared_domain, __isl_take isl_set *access,
1253 const char *type, struct cuda_array_ref_group *group)
1255 const char *array_name;
1256 char *name;
1257 isl_ctx *ctx;
1258 isl_union_map *sched;
1259 unsigned nvar = isl_set_dim(access, isl_dim_set);
1260 int n_tile;
1262 ctx = isl_set_get_ctx(access);
1263 array_name = isl_set_get_tuple_name(access);
1264 name = isl_alloc_array(ctx, char,
1265 strlen(type) + sizeof("_shared_") + strlen(array_name) + 20);
1266 if (group->array->n_group > 1)
1267 sprintf(name, "%s_shared_%s_%d", type, array_name, group->nr);
1268 else
1269 sprintf(name, "%s_shared_%s", type, array_name);
1270 access = isl_set_set_tuple_name(access, name);
1271 free(name);
1273 sched = access_schedule(gen, access, group);
1275 n_tile = gen->n_block;
1276 if (n_tile > nvar)
1277 n_tile = nvar;
1279 print_shared_body(gen, shared_domain, sched, nvar + n_tile, NULL, -1);
1281 isl_union_map_free(sched);
1284 /* Return the union of all read (read = 1) and/or write (write = 1)
1285 * access relations in the group.
1287 static __isl_give isl_union_map *group_access_relation(
1288 struct cuda_array_ref_group *group, int read, int write)
1290 int i;
1291 isl_union_map *access;
1293 access = isl_union_map_empty(isl_map_get_dim(group->access));
1294 for (i = 0; i < group->n_ref; ++i) {
1295 isl_map *map_i;
1297 if (!((read && group->refs[i]->read) ||
1298 (write && group->refs[i]->write)))
1299 continue;
1300 map_i = isl_map_copy(group->refs[i]->access);
1301 access = isl_union_map_union(access,
1302 isl_union_map_from_map(map_i));
1305 return access;
1308 /* Check that none of the shared memory tiles involve any strides.
1310 static int no_strides(struct cuda_array_ref_group *group)
1312 int i;
1313 int n_index = group->array->n_index;
1315 for (i = 0; i < n_index; ++i)
1316 if (group->shared_bound[i].shift)
1317 return 0;
1319 return 1;
1322 /* Return a set containing the values of the given index i
1323 * of the elements in the array tile in global memory that corresponds
1324 * to the shared memory copy.
1325 * In particular, if a is the index, we return a set with constraints
1327 * tile_offset <= a <= tile_offset + tile_size - 1
1329 * and
1331 * 0 <= a <= array_size - 1
1334 static __isl_give isl_set *group_tile_dim(struct cuda_array_ref_group *group,
1335 int i)
1337 isl_basic_set *tile;
1338 isl_aff *aff;
1339 isl_constraint *c;
1340 isl_local_space *ls;
1341 isl_pw_aff *bound;
1342 isl_set *dom;
1343 isl_set *tile_set;
1345 aff = isl_aff_copy(group->shared_bound[i].lb);
1346 aff = isl_aff_add_dims(aff, isl_dim_set, 1);
1347 ls = isl_aff_get_local_space(aff);
1348 aff = isl_aff_neg(aff);
1349 aff = isl_aff_add_coefficient_si(aff, isl_dim_set, 0, 1);
1350 c = isl_inequality_from_aff(isl_aff_copy(aff));
1351 tile = isl_basic_set_from_constraint(c);
1353 aff = isl_aff_neg(aff);
1354 aff = isl_aff_add_constant(aff, group->shared_bound[i].size);
1355 aff = isl_aff_add_constant_si(aff, -1);
1356 c = isl_inequality_from_aff(aff);
1357 tile = isl_basic_set_add_constraint(tile, c);
1359 aff = isl_aff_zero(ls);
1360 aff = isl_aff_add_coefficient_si(aff, isl_dim_set, 0, 1);
1361 c = isl_inequality_from_aff(aff);
1362 tile = isl_basic_set_add_constraint(tile, c);
1364 bound = isl_pw_aff_copy(group->array->bound[i]);
1365 bound = isl_pw_aff_add_dims(bound, isl_dim_set, 1);
1366 ls = isl_local_space_from_dim(isl_pw_aff_get_dim(bound));
1367 aff = isl_aff_zero(ls);
1368 aff = isl_aff_add_coefficient_si(aff, isl_dim_set, 0, 1);
1369 aff = isl_aff_add_constant_si(aff, 1);
1370 dom = isl_pw_aff_domain(isl_pw_aff_copy(bound));
1372 tile_set = isl_pw_aff_ge_set(bound, isl_pw_aff_alloc(dom, aff));
1373 tile_set = isl_set_align_params(tile_set, isl_basic_set_get_dim(tile));
1374 tile_set = isl_set_intersect(tile_set, isl_set_from_basic_set(tile));
1376 return tile_set;
1379 /* Return a set containing the elements in the array tile in
1380 * global memory that corresponds to the shared memory copy.
1382 static __isl_give isl_set *group_tile(struct cuda_array_ref_group *group)
1384 int i;
1385 int n_index = group->array->n_index;
1386 isl_set *tile;
1388 tile = group_tile_dim(group, 0);
1389 for (i = 1; i < n_index; ++i) {
1390 isl_set *tile_i;
1392 tile_i = group_tile_dim(group, i);
1393 tile = isl_set_flat_product(tile, tile_i);
1396 tile = isl_set_set_tuple_name(tile, group->array->name);
1398 return tile;
1401 /* Print code for reading into or writing from shared memory
1402 * the given array reference group.
1404 * sched maps the original iteration domains to the shared memory tile loops.
1406 * If we are performing a read from global memory to shared memory,
1407 * if the array involved is not a scalar and if the definition of the
1408 * shared memory tiles does not involve any strides, then we copy
1409 * the entire tile to shared memory. This may result in some extra
1410 * elements getting copied, but it should lead to simpler code
1411 * (which means that fewer registers may be needed) and less divergence.
1413 * Otherwise, we only copy the elements that will be read or have been written
1414 * in the kernel.
1416 * Note that the absence of stride requirement can easily be lifted.
1417 * We would just need to add constraints of the form
1419 * shift + a = stride * alpha
1421 static int print_group_shared_accesses(struct cuda_gen *gen,
1422 struct cuda_array_ref_group *group, const char *type,
1423 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *sched)
1425 int read;
1426 isl_union_map *access;
1427 isl_union_set *uset;
1428 isl_set *access_set;
1430 if (group->private_bound)
1431 return 0;
1432 if (!group->shared_bound)
1433 return 0;
1435 read = !strcmp(type, "read");
1437 access = group_access_relation(group, read, !read);
1438 access = isl_union_map_apply_domain(access, isl_union_map_copy(sched));
1439 uset = isl_union_map_range(access);
1441 if (isl_union_set_is_empty(uset)) {
1442 isl_union_set_free(uset);
1443 return 0;
1446 if (read && group->array->n_index > 0 && no_strides(group)) {
1447 isl_union_set_free(uset);
1448 access_set = group_tile(group);
1449 print_shared_access(gen, shared_domain, access_set,
1450 type, group);
1451 return 1;
1454 access_set = isl_set_from_union_set(uset);
1455 access_set = isl_set_coalesce(access_set);
1457 print_shared_access(gen, shared_domain, access_set, type, group);
1459 return 1;
1462 /* Print code for reading into or writing from shared memory at
1463 * the given level (-1 for innermost).
1465 * If we are not printing at the innermost level, then the dimensionality
1466 * of shared_domain may be smaller than gen->shared_len.
1467 * As the rest of the code assumes that the domain of access has
1468 * gen->shared_len dimensions, we therefore may need to embed this domain
1469 * in a higher dimensional space after intersection with shared_domain.
1471 static void print_shared_accesses(struct cuda_gen *gen,
1472 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *access,
1473 const char *type, int level)
1475 int i, j;
1476 isl_dim *dim;
1477 isl_map *proj;
1478 isl_set *par;
1479 int shared_len = isl_set_dim(shared_domain, isl_dim_set);
1480 int sync = 0;
1481 isl_union_map *sched;
1483 shared_domain = isl_set_copy(shared_domain);
1484 sched = isl_union_map_copy(gen->tiled_sched);
1485 dim = isl_union_map_get_dim(sched);
1486 proj = projection(dim, gen->tiled_len, shared_len);
1487 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
1488 sched = isl_union_map_intersect_range(sched,
1489 isl_union_set_from_set(isl_set_copy(shared_domain)));
1490 if (shared_len != gen->shared_len) {
1491 dim = isl_union_map_get_dim(sched);
1492 proj = projection(dim, gen->shared_len, shared_len);
1493 proj = isl_map_reverse(proj);
1494 shared_domain = isl_set_apply(shared_domain,
1495 isl_map_copy(proj));
1496 sched = isl_union_map_apply_range(sched,
1497 isl_union_map_from_map(proj));
1500 dim = isl_union_map_get_dim(sched);
1501 par = parametrization(dim, gen->shared_len, 0, gen->shared_len, "g");
1502 sched = isl_union_map_intersect_range(sched,
1503 isl_union_set_from_set(par));
1505 for (i = 0; i < gen->n_array; ++i) {
1506 struct cuda_array_info *array = &gen->array[i];
1508 if (gen->array[i].print_shared_level != level)
1509 continue;
1511 for (j = 0; j < array->n_group; ++j) {
1512 if (print_group_shared_accesses(gen, array->groups[j],
1513 type, shared_domain, sched))
1514 sync = 1;
1518 isl_union_map_free(sched);
1519 isl_set_free(shared_domain);
1521 if (sync) {
1522 print_indent(gen->cuda.kernel_c, gen->kernel_code.indent);
1523 fprintf(gen->cuda.kernel_c, "__syncthreads();\n");
1527 /* Given an index expression into a tile of an array, adjust the expression
1528 * to a shift of the tile to the origin
1529 * (based on the lower bounds in array->shared_bound).
1530 * If the index is strided, then we first add
1531 * bound->shift and divide by bound->stride.
1533 static __isl_give isl_qpolynomial *shift_index(__isl_take isl_qpolynomial *qp,
1534 struct cuda_array_info *array,
1535 struct cuda_array_bound *bound, __isl_take isl_set *domain)
1537 isl_qpolynomial *lb;
1539 if (bound->shift) {
1540 isl_qpolynomial *shift, *t;
1541 isl_int one;
1542 isl_dim *dim;
1543 shift = bound->shift;
1544 shift = isl_qpolynomial_copy(shift);
1545 shift = isl_qpolynomial_drop_dims(shift, isl_dim_set, 0,
1546 isl_qpolynomial_dim(shift, isl_dim_set));
1547 shift = isl_qpolynomial_align_params(shift,
1548 isl_qpolynomial_get_dim(qp));
1549 qp = isl_qpolynomial_add(qp, shift);
1550 dim = isl_qpolynomial_get_dim(qp);
1551 isl_int_init(one);
1552 isl_int_set_si(one, 1);
1553 t = isl_qpolynomial_rat_cst(dim, one, bound->stride);
1554 isl_int_clear(one);
1555 qp = isl_qpolynomial_mul(qp, t);
1558 lb = isl_qpolynomial_from_aff(isl_aff_copy(bound->lb));
1559 lb = isl_qpolynomial_drop_dims(lb, isl_dim_set, 0,
1560 isl_qpolynomial_dim(lb, isl_dim_set));
1562 lb = isl_qpolynomial_align_params(lb, isl_qpolynomial_get_dim(qp));
1564 qp = isl_qpolynomial_sub(qp, lb);
1565 qp = isl_qpolynomial_gist(qp, domain);
1567 return qp;
1570 /* This function is called for each access to an array in some statement
1571 * in the original code.
1572 * Replace that access by an access to shared or (linearized) global memory.
1573 * Since the array in shared memory is just
1574 * a shifted copy of part of the original array, we simply need
1575 * to subtract the lower bound, which was computed
1576 * in can_tile_for_shared_memory.
1577 * If any of the indices is strided, then we first add
1578 * shared_bound[i].shift and divide by shared_bound[i].stride.
1580 * If the given array is accessed directly from global memory,
1581 * we don't need to perform any shifting and simply simplify
1582 * expression in the context of the domain instead.
1584 * If the array space (range of access) has no name, then we are
1585 * accessing an iterator in the original program.
1587 static void print_access(struct cuda_gen *gen, __isl_take isl_map *access,
1588 int group_nr)
1590 int i;
1591 const char *name;
1592 unsigned n_index;
1593 struct cuda_array_info *array = NULL;
1594 isl_printer *prn;
1595 isl_basic_set *aff;
1596 isl_set *data_set;
1597 isl_set *domain;
1598 struct cuda_array_bound *bounds = NULL;
1600 access = isl_map_align_params(access,
1601 isl_set_get_dim(gen->stmt_domain));
1603 data_set = isl_set_apply(isl_set_copy(gen->stmt_domain), access);
1605 name = isl_set_get_tuple_name(data_set);
1607 if (!name)
1608 fprintf(gen->cuda.kernel_c, "(");
1609 else {
1610 struct cuda_array_ref_group *group;
1612 for (i = 0; i < gen->n_array; ++i) {
1613 if (strcmp(name, gen->array[i].name))
1614 continue;
1615 array = &gen->array[i];
1617 assert(array);
1618 group = array->groups[group_nr];
1619 bounds = group->private_bound;
1620 if (!bounds)
1621 bounds = group->shared_bound;
1623 print_array_name(gen->cuda.kernel_c, group);
1624 fprintf(gen->cuda.kernel_c, "[");
1628 n_index = isl_set_dim(data_set, isl_dim_set);
1629 aff = isl_set_affine_hull(data_set);
1631 prn = isl_printer_to_file(gen->ctx, gen->cuda.kernel_c);
1632 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1634 if (!bounds)
1635 for (i = 0; i + 1 < n_index; ++i)
1636 prn = isl_printer_print_str(prn, "(");
1638 for (i = 0; i < n_index; ++i) {
1639 isl_constraint *c;
1640 isl_qpolynomial *qp;
1641 int ok;
1643 ok = isl_basic_set_has_defining_equality(aff,
1644 isl_dim_out, i, &c);
1645 assert(ok);
1646 qp = isl_qpolynomial_from_constraint(c, isl_dim_out, i);
1647 qp = isl_qpolynomial_drop_dims(qp, isl_dim_set, 0,
1648 isl_qpolynomial_dim(qp, isl_dim_set));
1650 if (!array) {
1651 prn = isl_printer_print_qpolynomial(prn, qp);
1652 isl_qpolynomial_free(qp);
1653 continue;
1656 domain = isl_set_copy(gen->stmt_domain);
1657 domain = isl_set_project_out(domain, isl_dim_set, 0,
1658 isl_set_dim(domain, isl_dim_set));
1659 if (!bounds)
1660 qp = isl_qpolynomial_gist(qp, domain);
1661 else
1662 qp = shift_index(qp, array, &bounds[i], domain);
1664 if (i) {
1665 if (!bounds) {
1666 prn = isl_printer_print_str(prn, ") * (");
1667 prn = isl_printer_print_pw_aff(prn,
1668 array->local_bound[i]);
1669 prn = isl_printer_print_str(prn, ") + ");
1670 } else
1671 prn = isl_printer_print_str(prn, "][");
1673 prn = isl_printer_print_qpolynomial(prn, qp);
1674 isl_qpolynomial_free(qp);
1676 if (!name)
1677 prn = isl_printer_print_str(prn, ")");
1678 else
1679 prn = isl_printer_print_str(prn, "]");
1680 isl_printer_free(prn);
1682 isl_basic_set_free(aff);
1685 static struct cuda_stmt_access *print_expr(struct cuda_gen *gen, FILE *out,
1686 struct pet_expr *expr, struct cuda_stmt_access *access, int outer)
1688 int i;
1690 switch (expr->type) {
1691 case pet_expr_double:
1692 fprintf(out, "%g", expr->d);
1693 break;
1694 case pet_expr_access:
1695 print_access(gen, isl_map_copy(access->access), access->group);
1696 access = access->next;
1697 break;
1698 case pet_expr_unary:
1699 if (!outer)
1700 fprintf(out, "(");
1701 fprintf(out, " %s ", pet_op_str(expr->op));
1702 access = print_expr(gen, out, expr->args[pet_un_arg],
1703 access, 0);
1704 if (!outer)
1705 fprintf(out, ")");
1706 break;
1707 case pet_expr_binary:
1708 if (!outer)
1709 fprintf(out, "(");
1710 access = print_expr(gen, out, expr->args[pet_bin_lhs],
1711 access, 0);
1712 fprintf(out, " %s ", pet_op_str(expr->op));
1713 access = print_expr(gen, out, expr->args[pet_bin_rhs],
1714 access, 0);
1715 if (!outer)
1716 fprintf(out, ")");
1717 break;
1718 case pet_expr_ternary:
1719 if (!outer)
1720 fprintf(out, "(");
1721 access = print_expr(gen, out, expr->args[pet_ter_cond],
1722 access, 0);
1723 fprintf(out, " ? ");
1724 access = print_expr(gen, out, expr->args[pet_ter_true],
1725 access, 0);
1726 fprintf(out, " : ");
1727 access = print_expr(gen, out, expr->args[pet_ter_false],
1728 access, 0);
1729 if (!outer)
1730 fprintf(out, ")");
1731 break;
1732 case pet_expr_call:
1733 fprintf(out, "%s(", expr->name);
1734 for (i = 0; i < expr->n_arg; ++i) {
1735 if (i)
1736 fprintf(out, ", ");
1737 access = print_expr(gen, out, expr->args[i],
1738 access, 1);
1740 fprintf(out, ")");
1742 return access;
1745 static void print_stmt_body(struct cuda_gen *gen,
1746 FILE *out, struct cuda_stmt *stmt)
1748 print_expr(gen, out, stmt->body, stmt->accesses, 1);
1749 fprintf(out, ";\n");
1752 /* This function is called for each leaf in the innermost clast,
1753 * i.e., for each statemetn.
1754 * We print the statement body, simplifying the accesses based
1755 * on the schedule.
1757 static void print_statement(struct gpucode_info *code,
1758 struct clast_user_stmt *u)
1760 struct cuda_gen *gen = code->user;
1761 isl_dim *dim;
1762 isl_set *par;
1763 isl_set *stmt_domain;
1764 isl_union_map *stmt_sched;
1765 isl_union_set *uset;
1766 int nr;
1767 struct cuda_stmt *stmt;
1769 nr = atoi(u->statement->name + 2);
1770 stmt = &gen->stmts[nr];
1772 stmt_domain = extract_host_domain(u);
1774 stmt_sched = isl_union_map_intersect_range(
1775 isl_union_map_copy(gen->local_sched),
1776 isl_union_set_from_set(extend(stmt_domain,
1777 gen->thread_tiled_len)));
1778 dim = isl_union_map_get_dim(stmt_sched);
1779 par = parametrization(dim, gen->thread_tiled_len, 0,
1780 gen->thread_tiled_len, "c");
1781 stmt_sched = isl_union_map_intersect_range(stmt_sched,
1782 isl_union_set_from_set(par));
1784 uset = isl_union_map_domain(stmt_sched);
1785 dim = isl_union_set_get_dim(uset);
1786 dim = isl_dim_add(dim, isl_dim_set,
1787 isl_set_dim(stmt->domain, isl_dim_set));
1788 dim = isl_dim_set_tuple_name(dim, isl_dim_set, u->statement->name);
1789 gen->stmt_domain = isl_union_set_extract_set(uset, dim);
1790 isl_union_set_free(uset);
1792 print_indent(code->dst, code->indent);
1793 print_stmt_body(gen, code->dst, stmt);
1795 isl_set_free(gen->stmt_domain);
1798 /* Print an access to the element in the global memory copy of the
1799 * given array that corresponds to element [qp[0]][qp[1]]...
1800 * of the original array.
1801 * The copy in global memory has been linearized, so we need to take
1802 * the array size into account.
1804 static void print_private_global_index(isl_ctx *ctx, FILE *out,
1805 struct cuda_array_info *array, __isl_keep isl_qpolynomial **qp)
1807 int i;
1808 isl_printer *prn;
1810 fprintf(out, "%s[", array->name);
1811 prn = isl_printer_to_file(ctx, out);
1812 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1813 for (i = 0; i + 1 < array->n_index; ++i)
1814 prn = isl_printer_print_str(prn, "(");
1815 for (i = 0; i < array->n_index; ++i) {
1816 if (i) {
1817 prn = isl_printer_print_str(prn, ") * (");
1818 prn = isl_printer_print_pw_aff(prn,
1819 array->local_bound[i]);
1820 prn = isl_printer_print_str(prn, ") + ");
1822 prn = isl_printer_print_qpolynomial(prn, qp[i]);
1824 isl_printer_free(prn);
1825 fprintf(out, "]");
1828 /* Print an access to the element in the shared memory copy of the
1829 * given array reference group that corresponds to element [qps[0]][qps[1]]...
1830 * of the original array.
1831 * Since the array in shared memory is just a shifted copy of part
1832 * of the original array, we simply need to subtract the lower bound,
1833 * which was computed in can_tile_for_shared_memory.
1834 * If any of the indices is strided, then we first add
1835 * shared_bound[i].shift and divide by shared_bound[i].stride.
1837 static void print_private_local_index(isl_ctx *ctx, FILE *out,
1838 struct cuda_array_ref_group *group,
1839 __isl_keep isl_qpolynomial **qps, __isl_keep isl_set *domain)
1841 int i;
1842 isl_printer *prn;
1843 struct cuda_array_info *array = group->array;
1844 struct cuda_array_bound *bounds = group->private_bound;
1846 print_array_name(out, group);
1847 for (i = 0; i < array->n_index; ++i) {
1848 isl_qpolynomial *qp = isl_qpolynomial_copy(qps[i]);
1850 qp = shift_index(qp, array, &bounds[i], isl_set_copy(domain));
1852 fprintf(out, "[");
1853 prn = isl_printer_to_file(ctx, out);
1854 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1855 prn = isl_printer_print_qpolynomial(prn, qp);
1856 isl_printer_free(prn);
1857 fprintf(out, "]");
1858 isl_qpolynomial_free(qp);
1862 /* This function is called for each leaf in the clast of the code
1863 * for copying to or from private memory.
1864 * The statement name is read_private_<array> or write_private_<array>.
1866 * The schedule iterates over the array elements, so we can use
1867 * the domain of private_sched at the current scheduling position
1868 * as the index of the array.
1870 static void print_private_copy_statement(struct gpucode_info *code,
1871 struct clast_user_stmt *u)
1873 struct cuda_gen *gen = code->user;
1874 isl_set *domain;
1875 isl_map *sched;
1876 struct cuda_array_ref_group *group = gen->private_group;
1877 int i;
1878 unsigned n_in;
1879 unsigned n_out;
1880 isl_dim *dim;
1881 isl_set *param;
1882 isl_set *index;
1883 isl_basic_set *aff;
1884 isl_ctx *ctx;
1885 isl_qpolynomial **qp;
1886 int read;
1888 read = !strncmp(u->statement->name, "read", 4);
1890 domain = extract_host_domain(u);
1891 assert(domain);
1893 sched = isl_map_copy(gen->private_sched);
1894 sched = isl_map_reverse(sched);
1895 sched = isl_map_intersect_domain(sched, domain);
1896 n_in = isl_map_dim(sched, isl_dim_in);
1897 n_out = isl_map_dim(sched, isl_dim_out);
1898 dim = isl_map_get_dim(sched);
1899 dim = isl_dim_drop(dim, isl_dim_in, 0, n_in);
1900 dim = isl_dim_drop(dim, isl_dim_out, 0, n_out);
1901 param = parametrization(dim, n_in, 0, n_in, "c");
1902 sched = isl_map_align_params(sched, isl_set_get_dim(param));
1903 sched = isl_map_intersect_domain(sched, param);
1904 index = isl_map_range(sched);
1905 domain = isl_set_copy(index);
1906 aff = isl_set_affine_hull(index);
1907 domain = isl_set_project_out(domain, isl_dim_set, 0, n_out);
1909 ctx = isl_basic_set_get_ctx(aff);
1910 qp = isl_alloc_array(ctx, isl_qpolynomial *, n_out);
1911 assert(qp);
1913 for (i = 0; i < n_out; ++i) {
1914 isl_constraint *c;
1915 int ok;
1917 ok = isl_basic_set_has_defining_equality(aff,
1918 isl_dim_set, i, &c);
1919 assert(ok);
1920 qp[i] = isl_qpolynomial_from_constraint(c, isl_dim_set, i);
1921 qp[i] = isl_qpolynomial_drop_dims(qp[i], isl_dim_set, 0, n_out);
1924 print_indent(code->dst, code->indent);
1925 if (read) {
1926 print_private_local_index(ctx, code->dst, group, qp, domain);
1927 fprintf(code->dst, " = ");
1928 print_private_global_index(ctx, code->dst, group->array, qp);
1929 } else {
1930 print_private_global_index(ctx, code->dst, group->array, qp);
1931 fprintf(code->dst, " = ");
1932 print_private_local_index(ctx, code->dst, group, qp, domain);
1934 fprintf(code->dst, ";\n");
1936 for (i = 0; i < n_out; ++i)
1937 isl_qpolynomial_free(qp[i]);
1938 free(qp);
1940 isl_basic_set_free(aff);
1941 isl_set_free(domain);
1944 static void print_private_access(struct cuda_gen *gen,
1945 __isl_keep isl_set *shared_domain, __isl_take isl_set *access,
1946 const char *type, struct cuda_array_ref_group *group)
1948 const char *array_name;
1949 char *name;
1950 isl_ctx *ctx;
1951 unsigned nvar = isl_set_dim(access, isl_dim_set);
1952 isl_union_map *usched;
1954 if (isl_set_fast_is_empty(access)) {
1955 isl_set_free(access);
1956 return;
1959 ctx = isl_set_get_ctx(access);
1960 array_name = isl_set_get_tuple_name(access);
1961 name = isl_alloc_array(ctx, char,
1962 strlen(type) + sizeof("_private_") + strlen(array_name) + 20);
1963 if (group->array->n_group > 1)
1964 sprintf(name, "%s_private_%s_%d", type, array_name, group->nr);
1965 else
1966 sprintf(name, "%s_private_%s", type, array_name);
1967 access = isl_set_set_tuple_name(access, name);
1968 free(name);
1970 gen->private_sched = shift_access(access, group);
1971 gen->private_group = group;
1973 usched = isl_union_map_from_map(isl_map_copy(gen->private_sched));
1974 print_shared_body(gen, shared_domain, usched, nvar,
1975 &print_private_copy_statement, 1);
1976 isl_union_map_free(usched);
1978 isl_map_free(gen->private_sched);
1981 /* Print code for reading into or writing from private memory
1982 * the given array reference group.
1984 * sched maps the original iteration domains to the shared memory tile loops.
1986 static void print_group_private_accesses(struct cuda_gen *gen,
1987 struct cuda_array_ref_group *group,
1988 const char *type, __isl_keep isl_set *shared_domain,
1989 unsigned first_shared, int shared_len, __isl_keep isl_union_map *sched)
1991 int read;
1992 isl_union_map *access;
1993 isl_union_set *uset;
1994 isl_set *access_set;
1996 if (!group->private_bound)
1997 return;
1999 read = !strcmp(type, "read");
2001 access = group_access_relation(group, read, !read);
2002 access = isl_union_map_apply_domain(access, isl_union_map_copy(sched));
2003 access = isl_union_map_intersect(access,
2004 isl_union_map_copy(gen->private_access));
2005 uset = isl_union_map_range(access);
2007 if (isl_union_set_is_empty(uset)) {
2008 isl_union_set_free(uset);
2009 return;
2012 access_set = isl_set_from_union_set(uset);
2013 access_set = isl_set_coalesce(access_set);
2014 access_set = isl_set_eliminate(access_set, isl_dim_param,
2015 first_shared + shared_len,
2016 gen->shared_len - shared_len);
2018 print_private_access(gen, shared_domain, access_set, type, group);
2021 /* Print code for reading into or writing from private memory at
2022 * the given level (-1 for innermost).
2024 * If we are not printing at the innermost level, then the dimensionality
2025 * of shared_domain may be smaller than gen->shared_len.
2026 * As the rest of the code assumes that the domain of access has
2027 * gen->shared_len dimensions, we therefore may need to embed this domain
2028 * in a higher dimensional space after intersection with shared_domain.
2030 * This code is very similar to print_shared_accesses.
2031 * The main difference is that we to take into account gen->private_access.
2033 static void print_private_accesses(struct cuda_gen *gen,
2034 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *access,
2035 const char *type, int level)
2037 int i, j;
2038 isl_dim *dim;
2039 isl_map *proj;
2040 int shared_len = isl_set_dim(shared_domain, isl_dim_set);
2041 unsigned first_shared;
2042 isl_union_map *sched;
2044 shared_domain = isl_set_copy(shared_domain);
2045 sched = isl_union_map_copy(gen->tiled_sched);
2046 dim = isl_union_map_get_dim(sched);
2047 first_shared = isl_dim_size(dim, isl_dim_param);
2048 proj = projection(dim, gen->tiled_len, shared_len);
2049 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
2050 sched = isl_union_map_intersect_range(sched,
2051 isl_union_set_from_set(isl_set_copy(shared_domain)));
2052 if (shared_len != gen->shared_len) {
2053 dim = isl_union_map_get_dim(sched);
2054 proj = projection(dim, gen->shared_len, shared_len);
2055 proj = isl_map_reverse(proj);
2056 shared_domain = isl_set_apply(shared_domain,
2057 isl_map_copy(proj));
2058 sched = isl_union_map_apply_range(sched,
2059 isl_union_map_from_map(proj));
2062 for (i = 0; i < gen->n_array; ++i) {
2063 struct cuda_array_info *array = &gen->array[i];
2065 if (gen->array[i].print_shared_level != level)
2066 continue;
2068 for (j = 0; j < array->n_group; ++j)
2069 print_group_private_accesses(gen, array->groups[j],
2070 type, shared_domain,
2071 first_shared, shared_len, sched);
2074 isl_union_map_free(sched);
2075 isl_set_free(shared_domain);
2078 /* Set unroll[j] if the input dimension j is involved in
2079 * the index expression represented by bmap.
2081 static int check_unroll(__isl_take isl_basic_map *bmap, void *user)
2083 int i, j;
2084 int n_in = isl_basic_map_dim(bmap, isl_dim_in);
2085 int n_out = isl_basic_map_dim(bmap, isl_dim_out);
2086 int *unroll = user;
2088 for (i = 0; i < n_out; ++i) {
2089 isl_constraint *c;
2090 int ok;
2092 ok = isl_basic_map_has_defining_equality(bmap,
2093 isl_dim_out, i, &c);
2094 assert(ok);
2095 for (j = 0; j < n_in; ++j)
2096 if (isl_constraint_involves_dims(c, isl_dim_in, j, 1))
2097 unroll[j] = 1;
2098 isl_constraint_free(c);
2101 isl_basic_map_free(bmap);
2102 return 0;
2105 /* Given an array pos mapping input dimensions to the corresponding
2106 * output dimension, construct the corresponding map.
2108 static __isl_give isl_map *permutation(__isl_take isl_dim *dim,
2109 int *pos, int len)
2111 int i;
2112 isl_constraint *c;
2113 isl_basic_map *bmap;
2115 dim = isl_dim_add(dim, isl_dim_in, len);
2116 dim = isl_dim_add(dim, isl_dim_out, len);
2117 bmap = isl_basic_map_universe(isl_dim_copy(dim));
2119 for (i = 0; i < len; ++i) {
2120 c = isl_equality_alloc(isl_dim_copy(dim));
2121 isl_constraint_set_coefficient_si(c, isl_dim_in, i, -1);
2122 isl_constraint_set_coefficient_si(c, isl_dim_out, pos[i], 1);
2123 bmap = isl_basic_map_add_constraint(bmap, c);
2125 isl_dim_free(dim);
2127 return isl_map_from_basic_map(bmap);
2130 /* Find all loops involved in any of the index expressions for any of
2131 * the private accesses, move them innermost and then mark them as
2132 * requiring unrolling by setting gen->first_unroll.
2133 * The loops involved should all be parallel because of the checks
2134 * we performed in check_private_group_access. Moving them innermost
2135 * is therefore a valid transformation.
2137 static __isl_give isl_union_map *interchange_for_unroll(struct cuda_gen *gen,
2138 __isl_take isl_union_map *sched)
2140 int i, j;
2141 int unroll[gen->thread_tiled_len];
2142 int perm[gen->thread_tiled_len];
2143 isl_dim *dim;
2144 isl_map *permute;
2145 int len = gen->shared_len + gen->n_parallel + gen->n_block;
2147 gen->first_unroll = -1;
2149 for (i = 0; i < gen->thread_tiled_len; ++i)
2150 unroll[i] = 0;
2151 for (i = 0; i < gen->n_array; ++i) {
2152 struct cuda_array_info *array = &gen->array[i];
2154 for (j = 0; j < array->n_group; ++j) {
2155 isl_union_map *access;
2156 isl_map *acc;
2158 if (!array->groups[j]->private_bound)
2159 continue;
2161 access = group_access_relation(array->groups[j], 1, 1);
2162 access = isl_union_map_apply_domain(access,
2163 isl_union_map_copy(sched));
2165 acc = isl_map_from_union_map(access);
2166 isl_map_foreach_basic_map(acc, &check_unroll, unroll);
2168 isl_map_free(acc);
2172 for (i = 0; i < gen->shared_len; ++i)
2173 if (unroll[i])
2174 return sched;
2176 for (i = gen->shared_len; i < len; ++i)
2177 if (unroll[i])
2178 break;
2180 if (i >= len)
2181 return sched;
2183 for (i = len; i < gen->thread_tiled_len; ++i)
2184 if (unroll[i])
2185 return sched;
2187 j = 0;
2188 for (i = 0; i < gen->thread_tiled_len; ++i)
2189 if (!unroll[i])
2190 perm[i] = j++;
2191 gen->first_unroll = 1 + j;
2192 for (i = 0; i < len; ++i)
2193 if (unroll[i])
2194 perm[i] = j++;
2196 dim = isl_union_map_get_dim(sched);
2197 permute = permutation(dim, perm, gen->thread_tiled_len);
2198 sched = isl_union_map_apply_range(sched,
2199 isl_union_map_from_map(permute));
2201 return sched;
2204 /* This function is called for each leaf in the clast of the kernel code.
2205 * We first specialize the schedule to the site of the leaf and
2206 * print code for reading into shared memory, performing the actual
2207 * computations and writing from shared memory, with the required
2208 * synchronizations.
2210 static void print_kernel_user(struct gpucode_info *code,
2211 struct clast_user_stmt *u)
2213 struct cuda_gen *gen = code->user;
2214 isl_set *shared_domain;
2216 shared_domain = extract_entire_host_domain(u);
2218 print_shared_accesses(gen, shared_domain, gen->read, "read", -1);
2220 print_private_accesses(gen, shared_domain, gen->read, "read", -1);
2222 print_shared_body(gen, shared_domain, gen->local_sched,
2223 gen->thread_tiled_len, &print_statement,
2224 gen->first_unroll);
2226 print_private_accesses(gen, shared_domain, gen->write, "write", -1);
2228 print_indent(gen->cuda.kernel_c, gen->kernel_code.indent);
2229 fprintf(gen->cuda.kernel_c, "__syncthreads();\n");
2231 print_shared_accesses(gen, shared_domain, gen->write, "write", -1);
2233 isl_set_free(shared_domain);
2236 /* Check if we need to perform any copying to shared memory at this level
2237 * and if so, print the copying instructions.
2238 * Any array for which we are allowed to print copying instructions at
2239 * this level, but haven't done so already, is printed.
2241 static void print_kernel_for_head(struct gpucode_info *code,
2242 struct clast_for *f)
2244 int i;
2245 struct cuda_gen *gen = code->user;
2246 isl_set *domain;
2247 int level;
2248 int print = 0;
2250 domain = isl_set_from_cloog_domain(cloog_domain_copy(f->domain));
2251 level = isl_set_dim(domain, isl_dim_set) - 1;
2253 for (i = 0; i < gen->n_array; ++i) {
2254 if (gen->array[i].print_shared_level >= 0)
2255 continue;
2256 if (gen->array[i].last_shared > level)
2257 continue;
2258 gen->array[i].print_shared_level = level;
2259 print = 1;
2262 if (print) {
2263 print_shared_accesses(gen, domain, gen->read, "read", level);
2264 print_private_accesses(gen, domain, gen->read, "read", level);
2267 isl_set_free(domain);
2270 /* Print instructions for copying from shared memory for each array
2271 * for which print_kernel_for_head has added copying instructions
2272 * to shared memory.
2274 static void print_kernel_for_foot(struct gpucode_info *code,
2275 struct clast_for *f)
2277 int i;
2278 struct cuda_gen *gen = code->user;
2279 isl_set *domain;
2280 int level;
2281 int print = 0;
2283 domain = isl_set_from_cloog_domain(cloog_domain_copy(f->domain));
2284 level = isl_set_dim(domain, isl_dim_set) - 1;
2286 for (i = 0; i < gen->n_array; ++i) {
2287 if (gen->array[i].print_shared_level != level)
2288 continue;
2289 print = 1;
2290 break;
2293 if (print) {
2294 print_private_accesses(gen, domain, gen->write, "write", level);
2295 print_shared_accesses(gen, domain, gen->write, "write", level);
2298 isl_set_free(domain);
2301 /* Use CLooG to generate code for the outer gen->shared_first loops
2302 * of the local schedule "sched".
2303 * The pretty printing of this code is handled by gpu_print_host_stmt,
2304 * which calls print_kernel_user for each iteration of the shared tile loops.
2306 static void print_cloog_kernel_body(struct cuda_gen *gen,
2307 __isl_keep isl_set *context, __isl_keep isl_union_map *sched)
2309 int i;
2310 CloogOptions *options;
2311 CloogDomain *cloog_context;
2312 CloogUnionDomain *ud;
2313 CloogInput *input;
2314 struct clast_stmt *stmt;
2315 char name[20];
2317 sched = isl_union_map_copy(sched);
2318 sched = isl_union_map_align_params(sched, isl_set_get_dim(context));
2320 options = cloog_options_malloc(gen->state);
2321 options->language = LANGUAGE_C;
2322 options->strides = 1;
2323 options->sh = 1;
2324 options->stop = gen->shared_len;
2325 options->f = gen->tiled_len;
2326 options->l = gen->tiled_len;
2327 options->save_domains = 1;
2328 options->noscalars = 1;
2330 ud = cloog_union_domain_from_isl_union_map(sched);
2331 for (i = 0; i < gen->shared_len; ++i) {
2332 snprintf(name, sizeof(name), "g%d", i);
2333 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
2335 cloog_context = cloog_domain_from_isl_set(isl_set_copy(context));
2336 input = cloog_input_alloc(cloog_context, ud);
2338 stmt = cloog_clast_create_from_input(input, options);
2340 gen->kernel_code.indent = 4;
2341 gen->kernel_code.dst = gen->cuda.kernel_c;
2342 gen->kernel_code.print_user_stmt = NULL;
2343 gen->kernel_code.print_user_stmt_list = &print_kernel_user;
2344 gen->kernel_code.print_for_head = &print_kernel_for_head;
2345 gen->kernel_code.print_for_foot = &print_kernel_for_foot;
2346 gen->kernel_code.user = gen;
2347 gpu_print_host_stmt(&gen->kernel_code, stmt);
2349 cloog_clast_free(stmt);
2350 cloog_options_free(options);
2353 static void print_kernel_iterators(struct cuda_gen *gen)
2355 int i;
2356 const char *block_dims[] = { "blockIdx.x", "blockIdx.y" };
2357 const char *thread_dims[] = { "threadIdx.x", "threadIdx.y",
2358 "threadIdx.z" };
2360 if (gen->n_grid > 0) {
2361 print_indent(gen->cuda.kernel_c, 4);
2362 fprintf(gen->cuda.kernel_c, "int ");
2363 for (i = 0; i < gen->n_grid; ++i) {
2364 if (i)
2365 fprintf(gen->cuda.kernel_c, ", ");
2366 fprintf(gen->cuda.kernel_c, "b%d = %s",
2367 i, block_dims[gen->n_grid - 1 - i]);
2369 fprintf(gen->cuda.kernel_c, ";\n");
2372 if (gen->n_block > 0) {
2373 print_indent(gen->cuda.kernel_c, 4);
2374 fprintf(gen->cuda.kernel_c, "int ");
2375 for (i = 0; i < gen->n_block; ++i) {
2376 if (i)
2377 fprintf(gen->cuda.kernel_c, ", ");
2378 fprintf(gen->cuda.kernel_c, "t%d = %s",
2379 i, thread_dims[gen->n_block - 1 - i]);
2381 fprintf(gen->cuda.kernel_c, ";\n");
2385 static void print_group_shared_array(struct cuda_gen *gen,
2386 struct cuda_array_ref_group *group)
2388 int j;
2389 struct cuda_array_bound *bounds;
2391 bounds = group->private_bound;
2392 if (!bounds)
2393 bounds = group->shared_bound;
2394 if (!bounds)
2395 return;
2397 print_indent(gen->cuda.kernel_c, 4);
2398 fprintf(gen->cuda.kernel_c, "%s%s ",
2399 group->private_bound ? "" : "__shared__ ", gen->options->type);
2400 print_array_name(gen->cuda.kernel_c, group);
2401 for (j = 0; j < group->array->n_index; ++j) {
2402 fprintf(gen->cuda.kernel_c, "[");
2403 isl_int_print(gen->cuda.kernel_c, bounds[j].size, 0);
2404 fprintf(gen->cuda.kernel_c, "]");
2406 fprintf(gen->cuda.kernel_c, ";\n");
2409 static void print_shared_arrays(struct cuda_gen *gen)
2411 int i, j;
2413 for (i = 0; i < gen->n_array; ++i) {
2414 struct cuda_array_info *array = &gen->array[i];
2416 for (j = 0; j < array->n_group; ++j)
2417 print_group_shared_array(gen, array->groups[j]);
2421 static void print_kernel_body(struct cuda_gen *gen,
2422 __isl_keep isl_set *host_domain, __isl_keep isl_union_map *sched)
2424 isl_set *context;
2426 context = isl_set_copy(host_domain);
2427 context = parametrize(context, 0, gen->tile_first, "h");
2428 context = isl_set_project_out(context, isl_dim_set, 0, gen->tile_first);
2429 context = add_bounded_parameters(context,
2430 gen->n_grid, gen->grid_dim, "b");
2432 print_kernel_iterators(gen);
2433 print_shared_arrays(gen);
2435 fprintf(gen->cuda.kernel_c, "\n");
2437 print_cloog_kernel_body(gen, context, sched);
2439 isl_set_free(context);
2442 /* Given a constraint
2444 * a(p,i) + j = g f(e)
2446 * or -a(p,i) - j = g f(e) if sign < 0,
2447 * store a(p,i) in bound->shift and g (stride) in bound->stride.
2448 * a(p,i) is assumed to be an expression in only the parameters.
2450 static void extract_stride(__isl_keep isl_constraint *c,
2451 struct cuda_array_bound *bound, isl_int stride, int sign)
2453 int i;
2454 isl_int v;
2455 isl_int one;
2456 isl_dim *dim;
2457 unsigned nparam;
2458 isl_qpolynomial *qp;
2460 isl_int_set(bound->stride, stride);
2462 dim = isl_constraint_get_dim(c);
2463 dim = isl_dim_drop(dim, isl_dim_out, 0, 1);
2464 dim = isl_dim_drop(dim, isl_dim_in, 0, isl_dim_size(dim, isl_dim_in));
2465 dim = isl_dim_domain(dim);
2467 nparam = isl_dim_size(dim, isl_dim_param);
2469 isl_int_init(v);
2470 isl_int_init(one);
2471 isl_int_set_si(one, 1);
2473 isl_constraint_get_constant(c, &v);
2474 if (sign < 0)
2475 isl_int_neg(v, v);
2476 qp = isl_qpolynomial_rat_cst(isl_dim_copy(dim), v, one);
2478 for (i = 0; i < nparam; ++i) {
2479 isl_qpolynomial *t, *p;
2481 isl_constraint_get_coefficient(c, isl_dim_param, i, &v);
2482 if (isl_int_is_zero(v))
2483 continue;
2484 if (sign < 0)
2485 isl_int_neg(v, v);
2486 t = isl_qpolynomial_rat_cst(isl_dim_copy(dim), v, one);
2487 p = isl_qpolynomial_var(isl_dim_copy(dim), isl_dim_param, i);
2488 t = isl_qpolynomial_mul(t, p);
2489 qp = isl_qpolynomial_add(qp, t);
2492 isl_dim_free(dim);
2493 isl_int_clear(one);
2494 isl_int_clear(v);
2496 bound->shift = qp;
2499 /* Given an equality constraint of a map with a single output dimension j,
2500 * check if the constraint is of the form
2502 * a(p,i) + j = g f(e)
2504 * with a(p,i) an expression in the parameters and input dimensions
2505 * and f(e) an expression in the existentially quantified variables.
2506 * If so, and if g is larger than any such g from a previously considered
2507 * constraint, then call extract_stride. to record the stride information
2508 * in bound.
2510 static int check_stride_constraint(__isl_take isl_constraint *c, void *user)
2512 int i;
2513 isl_int v, stride;
2514 unsigned n_div;
2515 struct cuda_array_bound *bound = user;
2517 isl_int_init(v);
2518 isl_int_init(stride);
2520 n_div = isl_constraint_dim(c, isl_dim_div);
2521 isl_constraint_get_coefficient(c, isl_dim_out, 0, &v);
2523 if (n_div && (isl_int_is_one(v) || isl_int_is_negone(v))) {
2524 int s = isl_int_sgn(v);
2525 isl_int_set_si(stride, 0);
2526 for (i = 0; i < n_div; ++i) {
2527 isl_constraint_get_coefficient(c, isl_dim_div, i, &v);
2528 isl_int_gcd(stride, stride, v);
2530 if (!isl_int_is_zero(stride) &&
2531 isl_int_gt(stride, bound->stride))
2532 extract_stride(c, bound, stride, s);
2535 isl_int_clear(stride);
2536 isl_int_clear(v);
2538 isl_constraint_free(c);
2539 return 0;
2542 /* Given contraints on an array index i, check if we can find
2543 * a shift a(p) and a stride g such that
2545 * a(p) + i = 0 mod g
2547 * If so, record the information in bound and apply the mapping
2548 * i -> (i + a(p))/g to the array index in bounds and return
2549 * the new constraints.
2550 * If not, simply return the original constraints.
2552 static __isl_give isl_basic_map *check_stride(struct cuda_gen *gen,
2553 struct cuda_array_bound *bound, __isl_take isl_basic_map *bounds)
2555 isl_dim *dim;
2556 isl_basic_map *aff;
2557 isl_basic_map *shift;
2558 isl_qpolynomial *qp, *t;
2559 isl_int one;
2561 isl_int_set_si(bound->stride, -1);
2563 aff = isl_basic_map_affine_hull(isl_basic_map_copy(bounds));
2565 isl_basic_map_foreach_constraint(aff, &check_stride_constraint, bound);
2567 isl_basic_map_free(aff);
2569 if (isl_int_is_neg(bound->stride))
2570 return bounds;
2572 qp = isl_qpolynomial_copy(bound->shift);
2573 qp = isl_qpolynomial_add_dims(qp, isl_dim_set, 1);
2574 dim = isl_qpolynomial_get_dim(qp);
2575 t = isl_qpolynomial_var(isl_dim_copy(dim), isl_dim_set, 0);
2576 qp = isl_qpolynomial_add(qp, t);
2577 isl_int_init(one);
2578 isl_int_set_si(one, 1);
2579 t = isl_qpolynomial_rat_cst(dim, one, bound->stride);
2580 isl_int_clear(one);
2581 qp = isl_qpolynomial_mul(qp, t);
2582 shift = isl_basic_map_from_qpolynomial(qp);
2584 bound->shift_map = isl_basic_map_copy(shift);
2585 bounds = isl_basic_map_apply_range(bounds, shift);
2587 return bounds;
2590 struct cuda_size_info {
2591 isl_basic_set *bset;
2592 struct cuda_array_bound *bound;
2593 int pos;
2596 /* Given a constraint from the basic set describing the bounds on
2597 * an array index, check if it is a lower bound, say m i >= b(x), and,
2598 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
2599 * upper bound. If so, and if this bound is smaller than any bound
2600 * derived from earlier constraints, set the size to this bound on
2601 * the expression and the lower bound to ceil(b(x)/m).
2603 static int compute_size_in_direction(__isl_take isl_constraint *c, void *user)
2605 struct cuda_size_info *size = user;
2606 unsigned nparam;
2607 unsigned n_div;
2608 isl_int v;
2610 nparam = isl_basic_set_dim(size->bset, isl_dim_param);
2611 n_div = isl_constraint_dim(c, isl_dim_div);
2613 if (isl_constraint_involves_dims(c, isl_dim_div, 0, n_div)) {
2614 isl_constraint_free(c);
2615 return 0;
2618 isl_int_init(v);
2620 isl_constraint_get_coefficient(c, isl_dim_set, size->pos, &v);
2622 if (isl_int_is_pos(v)) {
2623 isl_aff *aff;
2624 isl_aff *lb;
2625 enum isl_lp_result res;
2627 aff = isl_constraint_get_bound(c, isl_dim_set, size->pos);
2628 aff = isl_aff_ceil(aff);
2630 lb = isl_aff_copy(aff);
2632 aff = isl_aff_neg(aff);
2633 aff = isl_aff_add_coefficient_si(aff, isl_dim_set, size->pos, 1);
2635 res = isl_basic_set_max(size->bset, aff, &v);
2636 isl_aff_free(aff);
2638 if (res == isl_lp_ok) {
2639 isl_int_add_ui(v, v, 1);
2640 if (isl_int_is_neg(size->bound->size) ||
2641 isl_int_lt(v, size->bound->size)) {
2642 isl_int_set(size->bound->size, v);
2643 lb = isl_aff_drop_dims(lb, isl_dim_set,
2644 0, size->pos + 1);
2645 isl_aff_free(size->bound->lb);
2646 size->bound->lb = isl_aff_copy(lb);
2649 isl_aff_free(lb);
2652 isl_int_clear(v);
2653 isl_constraint_free(c);
2655 return 0;
2658 /* Given a basic map "bounds" that maps parameters and input dimensions
2659 * to a single output dimension, look for an expression in the parameters
2660 * and input dimensions such that the range of the output dimension shifted
2661 * by this expression is a constant.
2663 * In particular, we currently only consider lower bounds on the output
2664 * dimension as candidate expressions.
2666 static int compute_array_dim_size(struct cuda_gen *gen,
2667 struct cuda_array_bound *bound, __isl_take isl_basic_map *bounds)
2669 struct cuda_size_info size;
2671 bounds = check_stride(gen, bound, bounds);
2673 isl_int_set_si(bound->size, -1);
2674 bound->lb = NULL;
2676 size.bound = bound;
2677 size.pos = isl_basic_map_dim(bounds, isl_dim_in);
2678 size.bset = isl_basic_map_wrap(bounds);
2679 size.bset = isl_basic_set_flatten(size.bset);
2680 isl_basic_set_foreach_constraint(size.bset, &compute_size_in_direction,
2681 &size);
2682 isl_basic_set_free(size.bset);
2684 return isl_int_is_nonneg(bound->size) ? 0 : -1;
2687 /* Check if we can find a shared memory tile for the given array
2688 * based on the given accesses, and if so, put the results
2689 * in array->shared_bound.
2691 * We project the accesses on each index in turn and look for a parametric
2692 * offset such that the size is constant.
2694 static int can_tile_for_shared_memory(struct cuda_gen *gen,
2695 struct cuda_array_info *array, __isl_keep isl_map *access,
2696 struct cuda_array_bound *bounds)
2698 int i;
2700 for (i = 0; i < array->n_index; ++i) {
2701 isl_map *access_i;
2702 isl_basic_map *hull;
2704 access_i = isl_map_copy(access);
2705 access_i = isl_map_project_out(access_i, isl_dim_out, 0, i);
2706 access_i = isl_map_project_out(access_i, isl_dim_out,
2707 1, array->n_index - (i + 1));
2708 access_i = isl_map_compute_divs(access_i);
2709 hull = isl_map_simple_hull(access_i);
2710 if (compute_array_dim_size(gen, &bounds[i], hull) < 0)
2711 return 0;
2714 return 1;
2717 /* Construct a map with input the shared tile loops and the loops that
2718 * will be wrapped around the threads that relates these later loops
2719 * to the thread indices and the projects them out.
2721 static __isl_give isl_map *compute_privatization(struct cuda_gen *gen)
2723 isl_map *priv;
2724 isl_map *tiling;
2725 isl_map *proj;
2726 isl_set *par;
2727 isl_dim *dim;
2729 dim = isl_union_map_get_dim(gen->shared_sched);
2731 if (gen->options->wrap)
2732 tiling = wrap(isl_dim_copy(dim), gen->shared_len + gen->n_block,
2733 gen->shared_len, gen->n_block, gen->block_dim);
2734 else
2735 tiling = tile(isl_dim_copy(dim), gen->shared_len + gen->n_block,
2736 gen->shared_len, gen->n_block, gen->block_dim);
2738 priv = tiling;
2740 par = parametrization(dim, gen->shared_len + 2 * gen->n_block,
2741 gen->tile_first + gen->tile_len + gen->n_grid + gen->n_block,
2742 gen->n_block, "t");
2744 priv = isl_map_align_params(priv, isl_set_get_dim(par));
2745 priv = isl_map_intersect_range(priv, par);
2747 dim = isl_map_get_dim(priv);
2748 dim = isl_dim_drop(dim, isl_dim_in, 0, isl_dim_size(dim, isl_dim_in));
2749 dim = isl_dim_drop(dim, isl_dim_out, 0, isl_dim_size(dim, isl_dim_out));
2750 proj = projection(dim, gen->shared_len + 2 * gen->n_block,
2751 gen->shared_len);
2753 priv = isl_map_apply_range(priv, proj);
2755 return priv;
2758 /* Construct a map from domain_dim to domain_dim that increments
2759 * the dimension at position "pos" and leaves all other dimensions
2760 * constant.
2762 static __isl_give isl_map *next(__isl_take isl_dim *domain_dim, int pos)
2764 int i;
2765 int len = isl_dim_size(domain_dim, isl_dim_set);
2766 isl_dim *dim;
2767 isl_basic_map *next;
2769 dim = isl_dim_map_from_set(domain_dim);
2770 next = isl_basic_map_universe(isl_dim_copy(dim));
2772 for (i = 0; i < len; ++i) {
2773 isl_constraint *c;
2775 c = isl_equality_alloc(isl_dim_copy(dim));
2776 isl_constraint_set_coefficient_si(c, isl_dim_in, i, 1);
2777 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
2778 if (i == pos)
2779 isl_constraint_set_constant_si(c, 1);
2780 next = isl_basic_map_add_constraint(next, c);
2783 isl_dim_free(dim);
2785 return isl_map_from_basic_map(next);
2788 /* Check if the given access is coalesced.
2789 * That is, check whether incrementing the dimension that will get
2790 * wrapped over the last thread index results in incrementing
2791 * the last array index.
2793 * This function is only called for access relations without reuse.
2795 static int access_is_coalesced(struct cuda_gen *gen,
2796 __isl_keep isl_union_map *access)
2798 isl_dim *dim;
2799 isl_map *access_map;
2800 isl_map *next_thread_x;
2801 isl_map *next_element;
2802 isl_map *map;
2803 int coalesced;
2805 access = isl_union_map_copy(access);
2806 access = isl_union_map_apply_domain(access,
2807 isl_union_map_copy(gen->tiled_sched));
2808 access_map = isl_map_from_union_map(access);
2810 dim = isl_map_get_dim(access_map);
2811 dim = isl_dim_domain(dim);
2812 next_thread_x = next(dim, gen->shared_len + gen->n_block - 1);
2814 dim = isl_map_get_dim(access_map);
2815 dim = isl_dim_range(dim);
2816 next_element = next(dim, isl_dim_size(dim, isl_dim_set) - 1);
2818 map = isl_map_apply_domain(next_thread_x, isl_map_copy(access_map));
2819 map = isl_map_apply_range(map, access_map);
2821 coalesced = isl_map_is_subset(map, next_element);
2823 isl_map_free(next_element);
2824 isl_map_free(map);
2826 return coalesced;
2829 /* For the given array reference group, check whether the access is private
2830 * to the thread. That is, check that any given array element
2831 * is only accessed by a single thread.
2832 * We compute an access relation that maps the shared tile loop iterators
2833 * and the shared point loop iterators that will be wrapped over the
2834 * threads to the array elements.
2835 * We actually check that those iterators that will be wrapped
2836 * partition the array space. This check is stricter than necessary
2837 * since several iterations may be mapped onto the same thread
2838 * and then they could be allowed to access the same memory elements,
2839 * but our check does not allow this situation.
2841 * We also check that the index expression only depends on parallel
2842 * loops. That way, we can move those loops innermost and unroll them.
2843 * Again, we use a test that is stricter than necessary.
2844 * We actually check whether the index expression only depends
2845 * on the iterators that are wrapped over the threads.
2846 * These are necessarily parallel, but there may be more parallel loops.
2848 * Combining the injectivity of the first test with the single-valuedness
2849 * of the second test, we simply test for bijectivity.
2851 * If it turns out we can use registers, we compute the private memory
2852 * tile size using can_tile_for_shared_memory, after introducing a dependence
2853 * on the thread indices.
2855 * Before performing any of the above computations, we first check
2856 * if there is any reuse on the reference group. If not, we simply
2857 * return. If, moreover, the access is coalesced then we also remove
2858 * the shared memory tiling since we should just use global memory instead.
2860 static void check_private_group_access(struct cuda_gen *gen,
2861 struct cuda_array_ref_group *group)
2863 isl_map *acc;
2864 isl_union_map *access;
2865 int n_index = group->array->n_index;
2867 access = group_access_relation(group, 1, 1);
2868 if (isl_union_map_is_injective(access)) {
2869 if (group->shared_bound && access_is_coalesced(gen, access)) {
2870 free_bound_list(group->shared_bound, n_index);
2871 group->shared_bound = NULL;
2873 isl_union_map_free(access);
2874 return;
2876 access = isl_union_map_apply_domain(access,
2877 isl_union_map_copy(gen->shared_sched));
2879 acc = isl_map_from_union_map(access);
2881 if (!isl_map_is_bijective(acc)) {
2882 isl_map_free(acc);
2883 return;
2886 group->private_bound = create_bound_list(gen->ctx, n_index);
2887 acc = isl_map_align_params(acc, isl_map_get_dim(gen->privatization));
2888 acc = isl_map_apply_domain(acc, isl_map_copy(gen->privatization));
2889 if (!can_tile_for_shared_memory(gen, group->array, acc,
2890 group->private_bound)) {
2891 free_bound_list(group->private_bound, n_index);
2892 group->private_bound = NULL;
2895 isl_map_free(acc);
2898 /* Look for the last shared tile loop that affects the offset of the
2899 * shared or private tile and store the result in array->last_shared.
2901 static void set_last_shared(struct cuda_gen *gen,
2902 struct cuda_array_ref_group *group)
2904 int i, j;
2905 struct cuda_array_bound *bounds;
2906 unsigned first_shared = gen->first_shared;
2907 int n_index = group->array->n_index;
2909 bounds = group->private_bound;
2910 if (!bounds)
2911 bounds = group->shared_bound;
2912 if (!bounds)
2913 return;
2915 for (j = gen->shared_len - 1; j >= 0; --j) {
2916 for (i = 0; i < n_index; ++i) {
2917 isl_aff *lb;
2918 isl_qpolynomial *shift;
2920 lb = bounds[i].lb;
2921 if (isl_aff_involves_dims(lb, isl_dim_param,
2922 first_shared + j, 1))
2923 break;
2925 shift = bounds[i].shift;
2926 if (!shift)
2927 continue;
2928 if (isl_qpolynomial_involves_dims(shift, isl_dim_param,
2929 first_shared + j, 1))
2930 break;
2932 if (i < n_index)
2933 break;
2935 group->array->last_shared = j;
2938 /* Compute the sizes of all private arrays for the current kernel,
2939 * as well as the offsets of the private pieces in the original arrays.
2940 * If we cannot or don't want to privatize a given array group,
2941 * we use the shared memory tile sizes computed in
2942 * compute_group_shared_bound instead.
2944 * If a given Array only has a single reference group and if we have
2945 * been able to find a privated or shared tile,
2946 * we also look for the last shared tile loop that affects the offset
2947 * (and therefore the array tile) and store the result in array->last_shared.
2949 * A privatized copy of all access relations from reference groups that
2950 * are mapped to private memory is stored in gen->privatization.
2952 static void compute_private_size(struct cuda_gen *gen)
2954 int i, j;
2955 isl_union_map *private;
2957 private = isl_union_map_empty(isl_union_map_get_dim(gen->shared_sched));
2959 for (i = 0; i < gen->n_array; ++i) {
2960 struct cuda_array_info *array = &gen->array[i];
2962 for (j = 0; j < array->n_group; ++j) {
2963 check_private_group_access(gen, array->groups[j]);
2965 if (!array->groups[j]->private_bound)
2966 continue;
2968 private = isl_union_map_union(private,
2969 group_access_relation(array->groups[j], 1, 1));
2972 array->last_shared = gen->shared_len - 1;
2973 array->print_shared_level = -1;
2975 if (array->n_group != 1)
2976 continue;
2977 set_last_shared(gen, array->groups[0]);
2980 if (isl_union_map_is_empty(private))
2981 isl_union_map_free(private);
2982 else {
2983 isl_union_map *priv;
2985 private = isl_union_map_apply_domain(private,
2986 isl_union_map_copy(gen->shared_sched));
2987 priv = isl_union_map_from_map(isl_map_copy(gen->privatization));
2988 private = isl_union_map_apply_domain(private, priv);
2989 gen->private_access = private;
2993 /* Fill up the groups array with singleton groups, i.e., one group
2994 * per reference, initializing the array, access, write and refs fields.
2995 * In particular the access field is initialized to the scheduled
2996 * access relation of the array reference.
2998 * Return the number of elements initialized, i.e., the number of
2999 * active references in the current kernel.
3001 static int populate_array_references(struct cuda_gen *gen,
3002 struct cuda_array_info *array, __isl_keep isl_union_map *sched,
3003 struct cuda_array_ref_group **groups)
3005 int i;
3006 int n;
3007 isl_ctx *ctx = isl_union_map_get_ctx(sched);
3009 n = 0;
3010 for (i = 0; i < array->n_ref; ++i) {
3011 isl_union_map *umap;
3012 isl_map *map;
3013 struct cuda_array_ref_group *group;
3014 struct cuda_stmt_access *access = array->refs[i];
3016 map = isl_map_copy(access->access);
3017 umap = isl_union_map_from_map(map);
3018 umap = isl_union_map_apply_domain(umap,
3019 isl_union_map_copy(sched));
3021 if (isl_union_map_is_empty(umap)) {
3022 isl_union_map_free(umap);
3023 continue;
3026 map = isl_map_from_union_map(umap);
3028 group = isl_calloc_type(ctx, struct cuda_array_ref_group);
3029 assert(group);
3030 group->array = array;
3031 group->access = map;
3032 group->write = access->write;
3033 group->refs = &array->refs[i];
3035 groups[n++] = group;
3038 return n;
3041 static void free_array_ref_group(struct cuda_array_ref_group *group,
3042 int n_index)
3044 if (!group)
3045 return;
3046 free_bound_list(group->shared_bound, n_index);
3047 free_bound_list(group->private_bound, n_index);
3048 isl_map_free(group->access);
3049 free(group->refs);
3050 free(group);
3053 /* If two groups have overlapping access relations and if one of them
3054 * involves a write, then merge the two groups into one.
3056 * We keep track of the grouping in "leader". leader[j] points to
3057 * an earlier group array element that belongs to the same group,
3058 * or the array element j itself if this element is the first in the group.
3060 * Return the number of group leaders.
3062 static int group_overlapping_writes(int n,
3063 struct cuda_array_ref_group **groups, int *leader)
3065 int i, j;
3066 int n_group = n;
3068 for (i = 0; i < n; ++i) {
3069 int l = i;
3070 groups[l]->n_ref = 1;
3071 for (j = i - 1; j >= 0; --j) {
3072 isl_map *map;
3073 int empty;
3075 if (leader[j] != j)
3076 continue;
3077 if (!groups[l]->write && !groups[j]->write)
3078 continue;
3080 map = isl_map_intersect(isl_map_copy(groups[l]->access),
3081 isl_map_copy(groups[j]->access));
3082 empty = isl_map_is_empty(map);
3083 isl_map_free(map);
3085 if (empty)
3086 continue;
3088 groups[j]->access = isl_map_union(groups[j]->access,
3089 groups[l]->access);
3090 groups[j]->write = 1;
3091 groups[l]->access = NULL;
3092 groups[j]->n_ref += groups[l]->n_ref;
3093 l = leader[l] = j;
3094 n_group--;
3096 leader[i] = l;
3099 return n_group;
3102 /* Compute the size of the shared array corresponding to the given array
3103 * array refrence group, based on the accesses from the current kernel,
3104 * as well as the offset of the shared piece in the original array.
3106 static void compute_group_shared_bound(struct cuda_gen *gen,
3107 struct cuda_array_info *array, struct cuda_array_ref_group *group)
3109 isl_ctx *ctx = isl_dim_get_ctx(array->dim);
3111 group->shared_bound = create_bound_list(ctx, array->n_index);
3112 if (!can_tile_for_shared_memory(gen, array, group->access,
3113 group->shared_bound)) {
3114 free_bound_list(group->shared_bound, array->n_index);
3115 group->shared_bound = NULL;
3119 /* Given an initial grouping of array references and shared memory tiles
3120 * for each group that allows for a shared memory tile, merge two groups
3121 * if both have a shared memory tile and if the merged group also has
3122 * a shared memory tile.
3124 * Return the number of group leaders after merging.
3126 static int group_common_shared_memory_tile(struct cuda_gen *gen,
3127 struct cuda_array_info *array, int n,
3128 struct cuda_array_ref_group **groups, int *leader, int n_group)
3130 int i, j;
3131 isl_ctx *ctx = isl_dim_get_ctx(array->dim);
3133 for (i = 0; n_group > 1 && i < n; ++i) {
3134 int l = i;
3135 if (leader[i] != i)
3136 continue;
3137 if (!groups[i]->shared_bound)
3138 continue;
3139 for (j = i - 1; j >= 0; --j) {
3140 isl_map *map;
3141 int empty;
3142 struct cuda_array_bound *shared_bound;
3144 if (leader[j] != j)
3145 continue;
3146 if (!groups[j]->shared_bound)
3147 continue;
3149 map = isl_map_intersect(isl_map_copy(groups[l]->access),
3150 isl_map_copy(groups[j]->access));
3151 empty = isl_map_is_empty(map);
3152 isl_map_free(map);
3154 if (empty)
3155 continue;
3157 map = isl_map_union(isl_map_copy(groups[l]->access),
3158 isl_map_copy(groups[j]->access));
3159 shared_bound = create_bound_list(ctx, array->n_index);
3160 if (!can_tile_for_shared_memory(gen, array, map,
3161 shared_bound)) {
3162 isl_map_free(map);
3163 free_bound_list(shared_bound, array->n_index);
3164 continue;
3167 free_bound_list(groups[j]->shared_bound,
3168 array->n_index);
3169 groups[j]->shared_bound = shared_bound;
3170 isl_map_free(groups[j]->access);
3171 groups[j]->access = map;
3172 groups[j]->n_ref += groups[l]->n_ref;
3173 l = leader[l] = j;
3174 n_group--;
3178 return n_group;
3181 /* Extract an array of array reference groups from the array of references
3182 * and the grouping information in "leader".
3184 * Store the results in array->n_group and array->groups.
3186 static void extract_array_groups(isl_ctx *ctx, struct cuda_array_info *array,
3187 int n, struct cuda_array_ref_group **groups, int *leader, int n_group)
3189 int i, j;
3191 for (i = 2; i < n; ++i)
3192 leader[i] = leader[leader[i]];
3194 array->n_group = n_group;
3195 array->groups = isl_alloc_array(ctx, struct cuda_array_ref_group *,
3196 n_group);
3197 assert(array->groups);
3199 j = 0;
3200 for (i = 0; i < n; ++i) {
3201 int k, l;
3202 struct cuda_stmt_access **refs;
3204 if (leader[i] != i) {
3205 groups[i]->refs = NULL;
3206 free_array_ref_group(groups[i], array->n_index);
3207 continue;
3210 refs = isl_alloc_array(ctx, struct cuda_stmt_access *,
3211 groups[i]->n_ref);
3212 assert(refs);
3213 l = 0;
3214 for (k = i; k < n; ++k)
3215 if (leader[k] == i) {
3216 refs[l++] = *groups[k]->refs;
3217 (*groups[k]->refs)->group = j;
3220 groups[i]->refs = refs;
3221 groups[i]->nr = j;
3222 array->groups[j++] = groups[i];
3226 /* Group array references that should be considered together when
3227 * deciding whether to access them from private, shared or global memory.
3229 * In particular, if two array references overlap and if one of them
3230 * is a write, then the two references are grouped together.
3231 * Furthermore, if two groups admit a shared memory tile and if the
3232 * combination of the two also admits a shared memory tile, we merge
3233 * the two groups.
3235 * During the construction the group->refs field points to a single
3236 * array reference inside the array of array references, while
3237 * group->n_ref contains the number of element in leader that
3238 * (directly or indirectly) point to this group, provided the group
3239 * is a leader.
3241 static void group_array_references(struct cuda_gen *gen,
3242 struct cuda_array_info *array, __isl_keep isl_union_map *sched)
3244 int i;
3245 int n, n_group;
3246 isl_ctx *ctx = isl_union_map_get_ctx(sched);
3247 struct cuda_array_ref_group **groups;
3248 int *leader;
3250 groups = isl_calloc_array(ctx, struct cuda_array_ref_group *,
3251 array->n_ref);
3252 assert(groups);
3254 n = populate_array_references(gen, array, sched, groups);
3256 leader = isl_alloc_array(ctx, int, n);
3257 assert(leader);
3259 n_group = group_overlapping_writes(n, groups, leader);
3261 for (i = 0; i < n; ++i)
3262 if (leader[i] == i)
3263 compute_group_shared_bound(gen, array, groups[i]);
3265 n_group = group_common_shared_memory_tile(gen, array, n, groups,
3266 leader, n_group);
3268 extract_array_groups(ctx, array, n, groups, leader, n_group);
3270 free(leader);
3271 free(groups);
3274 /* Take tiled_sched, project it onto the shared tile loops and
3275 * the loops that will be wrapped over the threads,
3276 * parametrize the shared tile loops and store the result in gen->shared_sched.
3277 * The position of the first of these parameters is stored in gen->first_shared.
3278 * Also compute a projection that projects out the loops that will be
3279 * wrapped over the threads and store this projection in gen->shared_proj.
3281 static void compute_shared_sched(struct cuda_gen *gen)
3283 isl_dim *dim;
3284 isl_map *proj;
3285 isl_set *par;
3286 isl_union_map *sched;
3288 sched = isl_union_map_copy(gen->tiled_sched);
3290 dim = isl_union_map_get_dim(sched);
3291 gen->first_shared = isl_dim_size(dim, isl_dim_param);
3292 proj = projection(dim, gen->tiled_len, gen->shared_len + gen->n_block);
3293 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
3295 dim = isl_union_map_get_dim(sched);
3296 par = parametrization(dim, gen->shared_len + gen->n_block,
3297 0, gen->shared_len, "g");
3298 sched = isl_union_map_intersect_range(sched,
3299 isl_union_set_from_set(par));
3301 dim = isl_union_map_get_dim(sched);
3302 proj = projection(dim, gen->shared_len + gen->n_block, gen->shared_len);
3304 gen->shared_sched = sched;
3305 gen->shared_proj = isl_union_map_from_map(proj);
3308 /* Group references of all arrays in the program.
3310 static void group_references(struct cuda_gen *gen)
3312 int i;
3313 isl_union_map *sched;
3315 sched = isl_union_map_apply_range(isl_union_map_copy(gen->shared_sched),
3316 isl_union_map_copy(gen->shared_proj));
3318 for (i = 0; i < gen->n_array; ++i)
3319 group_array_references(gen, &gen->array[i], sched);
3321 isl_union_map_free(sched);
3324 /* Free all array information that is local to the current kernel.
3326 static void free_local_array_info(struct cuda_gen *gen)
3328 int i, j;
3330 for (i = 0; i < gen->n_array; ++i) {
3331 struct cuda_array_info *array = &gen->array[i];
3333 for (j = 0; j < array->n_group; ++j)
3334 free_array_ref_group(array->groups[j], array->n_index);
3335 free(array->groups);
3337 if (array->n_group == 0)
3338 continue;
3339 for (j = 0; j < gen->array[i].n_index; ++j) {
3340 isl_pw_aff_free(gen->array[i].local_bound[j]);
3341 gen->array[i].local_bound[j] = NULL;
3346 static void print_iterator_list(FILE *out, int len, const char *prefix,
3347 int parens)
3349 int i;
3351 fprintf(out, "(");
3352 for (i = 0; i < len; ++i) {
3353 if (i)
3354 fprintf(out, ", ");
3355 if (parens)
3356 fprintf(out, "(%s%d)", prefix, i);
3357 else
3358 fprintf(out, "%s%d", prefix, i);
3360 fprintf(out, ")");
3363 /* Print an access to the element in the global memory copy of the
3364 * given array that corresponds to element [a0][a1]... of the original array.
3365 * The copy in global memory has been linearized, so we need to take
3366 * the array size into account.
3368 static void print_global_index(isl_ctx *ctx, FILE *out,
3369 struct cuda_array_info *array)
3371 int i;
3372 isl_printer *prn;
3374 fprintf(out, "%s[", array->name);
3375 for (i = 0; i + 1 < array->n_index; ++i)
3376 fprintf(out, "(");
3377 for (i = 0; i < array->n_index; ++i) {
3378 if (i) {
3379 prn = isl_printer_to_file(ctx, out);
3380 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
3381 prn = isl_printer_print_str(prn, ") * (");
3382 prn = isl_printer_print_pw_aff(prn,
3383 array->local_bound[i]);
3384 prn = isl_printer_print_str(prn, ") + ");
3385 isl_printer_free(prn);
3387 fprintf(out, "a%d", i);
3389 fprintf(out, "]");
3392 /* Print an access to the element in the shared memory copy of the
3393 * given array that corresponds to element [a0][a1]... of the original array.
3394 * Since the array in shared memory is just a shifted copy of part
3395 * of the original array, we simply need to subtract the lower bound,
3396 * which was computed in can_tile_for_shared_memory.
3397 * If any of the indices is strided, then we first add
3398 * shared_bound[i].shift and divide by shared_bound[i].stride.
3400 static void print_local_index(FILE *out, struct cuda_array_ref_group *group)
3402 int i;
3403 isl_ctx *ctx;
3404 isl_printer *prn;
3405 struct cuda_array_bound *bounds = group->shared_bound;
3407 ctx = isl_dim_get_ctx(group->array->dim);
3408 print_array_name(out, group);
3409 for (i = 0; i < group->array->n_index; ++i) {
3410 fprintf(out, "[(a%d", i);
3411 if (bounds[i].shift) {
3412 fprintf(out, " + (");
3413 prn = isl_printer_to_file(ctx, out);
3414 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
3415 prn = isl_printer_print_qpolynomial(prn,
3416 bounds[i].shift);
3417 prn = isl_printer_print_str(prn, "))/");
3418 prn = isl_printer_print_isl_int(prn,
3419 bounds[i].stride);
3420 isl_printer_free(prn);
3421 } else
3422 fprintf(out, ")");
3423 fprintf(out, " - (");
3424 prn = isl_printer_to_file(ctx, out);
3425 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
3426 prn = isl_printer_print_aff(prn, bounds[i].lb);
3427 isl_printer_free(prn);
3428 fprintf(out, ")]");
3432 /* Print '#define's for copying data from global memory to shared
3433 * memory and back for the given array.
3435 static void print_array_copy_defines(struct cuda_gen *gen,
3436 struct cuda_array_ref_group *group)
3438 int i;
3439 const char *type[] = { "read", "write" };
3440 struct cuda_array_info *array = group->array;
3441 int n_index = array->n_index;
3443 for (i = 0; i < 2; ++i) {
3444 fprintf(gen->cuda.kernel_c, "#define %s_", type[i]);
3445 print_array_name(gen->cuda.kernel_c, group);
3446 print_iterator_list(gen->cuda.kernel_c, n_index, "a", 0);
3447 fprintf(gen->cuda.kernel_c, " %s_", type[i]);
3448 print_array_name(gen->cuda.kernel_c, group);
3449 fprintf(gen->cuda.kernel_c, "_");
3450 print_iterator_list(gen->cuda.kernel_c, n_index, "a", 1);
3451 fprintf(gen->cuda.kernel_c, "\n");
3453 fprintf(gen->cuda.kernel_c, "#define %s_", type[i]);
3454 print_array_name(gen->cuda.kernel_c, group);
3455 fprintf(gen->cuda.kernel_c, "_");
3456 print_iterator_list(gen->cuda.kernel_c, n_index, "a", 0);
3457 if (i) {
3458 fprintf(gen->cuda.kernel_c, " ");
3459 print_global_index(gen->ctx, gen->cuda.kernel_c, array);
3460 fprintf(gen->cuda.kernel_c, " = ");
3461 print_local_index(gen->cuda.kernel_c, group);
3462 } else {
3463 fprintf(gen->cuda.kernel_c, " ");
3464 print_local_index(gen->cuda.kernel_c, group);
3465 fprintf(gen->cuda.kernel_c, " = ");
3466 print_global_index(gen->ctx, gen->cuda.kernel_c, array);
3468 fprintf(gen->cuda.kernel_c, "\n");
3472 static void print_copy_defines(struct cuda_gen *gen)
3474 int i, j;
3476 for (i = 0; i < gen->n_array; ++i) {
3477 struct cuda_array_info *array = &gen->array[i];
3479 for (j = 0; j < array->n_group; ++j) {
3480 if (array->groups[j]->private_bound)
3481 continue;
3482 if (!array->groups[j]->shared_bound)
3483 continue;
3484 print_array_copy_defines(gen, array->groups[j]);
3489 /* The sizes of the arrays on the host that have been computed by
3490 * extract_array_info may depend on the parameters. Use the extra
3491 * constraints on the parameters that are valid at "host_domain"
3492 * to simplify these expressions.
3494 static void localize_bounds(struct cuda_gen *gen,
3495 __isl_keep isl_set *host_domain)
3497 int i, j;
3498 isl_set *context;
3499 unsigned nvar;
3501 context = isl_set_copy(host_domain);
3502 nvar = isl_set_dim(host_domain, isl_dim_set);
3503 context = isl_set_project_out(host_domain, isl_dim_set, 0, nvar);
3505 for (i = 0; i < gen->n_array; ++i) {
3506 struct cuda_array_info *array = &gen->array[i];
3508 if (array->n_group == 0)
3509 continue;
3511 for (j = 0; j < array->n_index; ++j) {
3512 isl_pw_aff *pwaff;
3514 pwaff = isl_pw_aff_copy(array->bound[j]);
3515 pwaff = isl_pw_aff_gist(pwaff, isl_set_copy(context));
3516 array->local_bound[j] = pwaff;
3519 isl_set_free(context);
3522 /* Set gen->tile_len and gen->n_parallel to those of the first statement
3523 * in the statement list u.
3524 * Because of the way the schedule is constructed, the other statements
3525 * in the list, if any, should have the same values for these properties.
3527 static void set_tile_len(struct cuda_gen *gen, struct clast_user_stmt *u)
3529 int nr;
3530 struct cuda_stmt *stmt;
3532 nr = atoi(u->statement->name + 2);
3533 stmt = &gen->stmts[nr];
3535 gen->tile_len = stmt->tile_len;
3536 gen->n_parallel = stmt->n_parallel;
3539 /* This function is called for each leaf in the clast of the host code.
3540 * We first specialize the schedule to the site of the leaf, compute
3541 * the size of shared memory and then print the body of host code
3542 * and the associated kernel (through a call to print_kernel_body).
3544 static void print_host_user(struct gpucode_info *code,
3545 struct clast_user_stmt *u)
3547 struct cuda_gen *gen = code->user;
3548 isl_dim *dim;
3549 isl_set *par;
3550 isl_set *host_domain;
3551 isl_union_map *access;
3552 isl_union_map *local_sched;
3553 isl_union_set *arrays;
3555 set_tile_len(gen, u);
3556 read_sizes(gen);
3558 host_domain = extract_entire_host_domain(u);
3560 local_sched = isl_union_map_intersect_range(
3561 isl_union_map_copy(gen->sched),
3562 isl_union_set_from_set(extend(isl_set_copy(host_domain),
3563 gen->untiled_len)));
3564 access = isl_union_map_union(isl_union_map_copy(gen->read),
3565 isl_union_map_copy(gen->write));
3566 access = isl_union_map_apply_domain(access,
3567 isl_union_map_copy(local_sched));
3568 arrays = isl_union_map_range(access);
3570 print_indent(code->dst, code->indent);
3571 fprintf(code->dst, "dim3 k%d_dimBlock(", gen->kernel_id);
3572 print_reverse_list(code->dst, gen->n_block, gen->block_dim);
3573 fprintf(code->dst, ");\n");
3575 print_indent(code->dst, code->indent);
3576 fprintf(code->dst, "dim3 k%d_dimGrid(", gen->kernel_id);
3577 print_reverse_list(code->dst, gen->n_grid, gen->grid_dim);
3578 fprintf(code->dst, ");\n");
3580 gen->tiled_sched = tile_schedule(gen, local_sched);
3581 gen->tiled_sched = parametrize_tiled_schedule(gen, gen->tiled_sched);
3582 gen->tiled_sched = scale_tile_loops(gen, gen->tiled_sched);
3584 gen->local_sched = isl_union_map_copy(gen->tiled_sched);
3586 dim = isl_union_map_get_dim(gen->local_sched);
3587 par = parametrization(dim, gen->tiled_len, 0, gen->shared_len, "g");
3588 gen->local_sched = isl_union_map_intersect_range(gen->local_sched,
3589 isl_union_set_from_set(par));
3591 gen->local_sched = thread_tile_schedule(gen, gen->local_sched);
3592 gen->local_sched = scale_thread_tile_loops(gen, gen->local_sched);
3594 gen->private_access = NULL;
3595 compute_shared_sched(gen);
3596 gen->privatization = compute_privatization(gen);
3597 group_references(gen);
3598 compute_private_size(gen);
3599 localize_bounds(gen, host_domain);
3601 gen->local_sched = interchange_for_unroll(gen, gen->local_sched);
3603 print_copy_defines(gen);
3604 print_kernel_launch(gen, arrays);
3606 fprintf(gen->cuda.kernel_c, "{\n");
3608 print_kernel_body(gen, host_domain, gen->tiled_sched);
3610 fprintf(gen->cuda.kernel_c, "}\n");
3612 free_local_array_info(gen);
3613 isl_map_free(gen->privatization);
3614 isl_union_map_free(gen->private_access);
3615 isl_union_map_free(gen->local_sched);
3616 isl_union_map_free(gen->tiled_sched);
3617 isl_union_map_free(gen->shared_sched);
3618 isl_union_map_free(gen->shared_proj);
3619 isl_union_set_free(arrays);
3620 isl_set_free(host_domain);
3622 free(gen->tile_size);
3623 gen->kernel_id++;
3626 /* Use CLooG to generate code for the outer gen->tile_first loops
3627 * of the global schedule in gen->sched.
3628 * The pretty printing of this code is handled by gpu_print_host_stmt,
3629 * which calls print_host_user for each kernel invocation location.
3631 static void print_cloog_host_code(struct cuda_gen *gen)
3633 int i;
3634 isl_set *context;
3635 isl_union_map *sched;
3636 CloogOptions *options;
3637 CloogDomain *cloog_context;
3638 CloogUnionDomain *ud;
3639 CloogInput *input;
3640 struct clast_stmt *stmt;
3641 char name[20];
3643 options = cloog_options_malloc(gen->state);
3644 options->language = LANGUAGE_C;
3645 options->otl = 0;
3646 options->strides = 1;
3647 options->stop = gen->tile_first;
3648 options->f = gen->untiled_len;
3649 options->l = gen->untiled_len;
3650 options->save_domains = 1;
3651 options->noscalars = 1;
3653 sched = isl_union_map_copy(gen->sched);
3654 ud = cloog_union_domain_from_isl_union_map(sched);
3655 for (i = 0; i < options->stop; ++i) {
3656 snprintf(name, sizeof(name), "h%d", i);
3657 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
3659 context = isl_set_copy(gen->context);
3660 cloog_context = cloog_domain_from_isl_set(context);
3661 input = cloog_input_alloc(cloog_context, ud);
3663 stmt = cloog_clast_create_from_input(input, options);
3665 gen->code.indent = 0;
3666 gen->code.dst = gen->cuda.host_c;
3667 gen->code.print_user_stmt = NULL;
3668 gen->code.print_user_stmt_list = &print_host_user;
3669 gen->code.print_for_head = NULL;
3670 gen->code.print_for_foot = NULL;
3671 gen->code.user = gen;
3672 gpu_print_host_stmt(&gen->code, stmt);
3674 cloog_clast_free(stmt);
3675 cloog_options_free(options);
3678 void print_host_code(struct cuda_gen *gen)
3680 fprintf(gen->cuda.host_c, "{\n");
3681 print_cloog_macros(gen->cuda.host_c);
3682 print_cloog_macros(gen->cuda.kernel_c);
3684 declare_device_arrays(gen);
3686 allocate_device_arrays(gen);
3687 copy_arrays_to_device(gen);
3689 gen->kernel_id = 0;
3690 print_cloog_host_code(gen);
3692 copy_arrays_from_device(gen);
3693 free_device_arrays(gen);
3695 fprintf(gen->cuda.host_c, "}\n");
3698 __isl_give isl_set *add_context_from_str(__isl_take isl_set *set,
3699 const char *str)
3701 isl_ctx *ctx;
3702 isl_set *context;
3704 if (!str)
3705 return set;
3707 ctx = isl_set_get_ctx(set);
3708 context = isl_set_read_from_str(ctx, str, -1);
3709 context = isl_set_align_params(context, isl_set_get_dim(set));
3710 set = isl_set_intersect(set, context);
3712 return set;
3715 /* Return the union of all iteration domains of the gen->stmts[i].
3717 static __isl_give isl_union_set *extract_domain(struct cuda_gen *gen)
3719 int i;
3720 isl_union_set *domain;
3722 domain = isl_union_set_empty(isl_set_get_dim(gen->context));
3723 for (i = 0; i < gen->n_stmts; ++i) {
3724 isl_set *domain_i;
3726 domain_i = isl_set_copy(gen->stmts[i].domain);
3727 domain = isl_union_set_union(domain,
3728 isl_union_set_from_set(domain_i));
3731 return domain;
3734 /* Information about the outermost tilable bands in the forest of bands.
3736 * tile_len and n_parallel are only sets on band_info structures
3737 * that correspond to outermost bands. For other bands (in particular,
3738 * ancestors of the outermost bands), n_parallal is set to 0.
3740 * prefix is the (padded) schedule leading up to the outermost tilable bands.
3742 * tile_first is the number of schedule dimensions in prefix.
3744 * suffix is the schedule of the outermost tilable bands and their descendants.
3746 struct band_info {
3747 struct cuda_gen *gen;
3748 int tile_first;
3749 int tile_len;
3750 int n_parallel;
3751 isl_union_map *prefix;
3752 isl_union_map *suffix;
3755 /* Set tile_len and n_parallel of the statement to that of
3756 * their outermost band, recorded in the band_info.
3758 static int set_stmt_tile_len(__isl_take isl_map *map, void *user)
3760 struct band_info *info = user;
3761 int nr;
3762 struct cuda_stmt *stmt;
3764 nr = atoi(isl_map_get_tuple_name(map, isl_dim_in) + 2);
3765 stmt = &info->gen->stmts[nr];
3767 stmt->tile_len = info->tile_len;
3768 stmt->n_parallel = info->n_parallel;
3770 isl_map_free(map);
3772 return 0;
3775 static void list_select_outer_band(struct cuda_gen *gen,
3776 __isl_take isl_band_list *list, int pos, struct band_info *list_info);
3778 /* Check if this band has any parallel loops. If so, take it as
3779 * the outermost tilable band. If not, continue looking for the
3780 * outermost tilable band in the children of the current band.
3782 static void band_select_outer_band(struct cuda_gen *gen,
3783 __isl_take isl_band *band, int pos, struct band_info *info)
3785 int n = isl_band_n_member(band);
3786 int n_parallel;
3788 for (n_parallel = 0; n_parallel < n; ++n_parallel)
3789 if (!isl_band_member_is_zero_distance(band, n_parallel))
3790 break;
3792 info->n_parallel = n_parallel;
3793 if (n_parallel) {
3794 info->gen = gen;
3795 info->tile_first = pos;
3796 info->tile_len = n;
3797 info->prefix = isl_band_get_prefix_schedule(band);
3798 info->suffix = isl_union_map_flat_range_product(
3799 isl_band_get_partial_schedule(band),
3800 isl_band_get_suffix_schedule(band));
3801 isl_union_map_foreach_map(info->prefix,
3802 &set_stmt_tile_len, info);
3803 } else {
3804 isl_band_list *children;
3805 assert(isl_band_has_children(band));
3806 children = isl_band_get_children(band);
3807 list_select_outer_band(gen, children, pos + n, info);
3810 isl_band_free(band);
3813 /* Comparison function that returns a non-zero value for band_infos
3814 * with different tile_len fields or different n_parallel fields.
3816 static int cmp_band(const void *p1, const void *p2)
3818 const struct band_info *info1 = p1;
3819 const struct band_info *info2 = p2;
3821 if (info1->tile_len != info2->tile_len)
3822 return info1->tile_len - info2->tile_len;
3824 return info1->n_parallel - info2->n_parallel;
3827 /* Extend "umap" with coordinates with fixed value "val"
3828 * to a total length of "dst_len", assuming the original dimension is "src_len".
3830 static __isl_give isl_union_map *extend_range(__isl_take isl_union_map *umap,
3831 int src_len, int dst_len, int val)
3833 isl_dim *dim;
3834 isl_map *map;
3835 int i;
3837 dim = isl_union_map_get_dim(umap);
3838 map = isl_map_reverse(projection(dim, dst_len, src_len));
3839 for (i = src_len; i < dst_len; ++i)
3840 map = isl_map_fix_si(map, isl_dim_out, i, val);
3842 umap = isl_union_map_apply_range(umap, isl_union_map_from_map(map));
3844 return umap;
3847 /* Group bands with the same values for tile_len and n_parallel.
3848 * The prefix schedule is then extended with a fixed coordinate that
3849 * is different for each such group.
3850 * Note that the actual values for this coordinate are not important.
3851 * The bands have already been effectively separated at a higher level
3852 * or they are independent and may be executed in parallel.
3853 * The list of band_info has been sorted before this functions is called.
3855 static void separate_bands(struct band_info *info, int n)
3857 int i;
3858 int j = 0;
3860 for (i = 0; i < n; ++i) {
3861 int l = info[i].tile_first;
3863 if (i &&
3864 (info[i].tile_len != info[i - 1].tile_len ||
3865 info[i].n_parallel != info[i - 1].n_parallel))
3866 j++;
3868 info[i].prefix = extend_range(info[i].prefix,
3869 l, l + 1, j);
3870 info[i].tile_first = l + 1;
3874 /* Select the outermost bands in the elements of the list, align
3875 * their prefix schedules, separate bands with different values
3876 * for tile_len and/or n_parallel and then combine the resulting
3877 * prefix and suffix schedules into a single pair of prefix and
3878 * suffix schedules for the entire list.
3880 static void list_select_outer_band(struct cuda_gen *gen,
3881 __isl_take isl_band_list *list, int pos, struct band_info *list_info)
3883 isl_band *band;
3884 int i;
3885 int n = isl_band_list_n_band(list);
3886 isl_ctx *ctx = isl_band_list_get_ctx(list);
3887 struct band_info *info;
3888 int max_tile_first;
3889 isl_union_map *prefix;
3890 isl_union_map *suffix;
3892 assert(n >= 1);
3893 info = isl_calloc_array(ctx, struct band_info, n);
3894 assert(info);
3896 max_tile_first = 0;
3897 for (i = 0; i < n; ++i) {
3898 band = isl_band_list_get_band(list, i);
3899 band_select_outer_band(gen, band, pos, &info[i]);
3900 if (info[i].tile_first > max_tile_first)
3901 max_tile_first = info[i].tile_first;
3904 for (i = 0; i < n; ++i) {
3905 if (info[i].tile_first == max_tile_first)
3906 continue;
3907 info[i].prefix = extend_range(info[i].prefix,
3908 info[i].tile_first, max_tile_first, 0);
3911 qsort(info, n, sizeof(struct band_info), &cmp_band);
3913 for (i = 0; i < n - 1; ++i)
3914 if (info[i].tile_len != info[i + 1].tile_len ||
3915 info[i].n_parallel != info[i + 1].n_parallel)
3916 break;
3918 if (i < n -1)
3919 separate_bands(info, n);
3921 prefix = info[0].prefix;
3922 suffix = info[0].suffix;
3924 for (i = 1; i < n; ++i) {
3925 prefix = isl_union_map_union(prefix, info[i].prefix);
3926 suffix = isl_union_map_union(suffix, info[i].suffix);
3929 list_info->tile_first = info[0].tile_first;
3930 list_info->tile_len = -1;
3931 list_info->prefix = prefix;
3932 list_info->suffix = suffix;
3934 isl_band_list_free(list);
3935 free(info);
3938 /* Set max_out to the maximal number of output dimensions over
3939 * all maps.
3941 static int update_max_out(__isl_take isl_map *map, void *user)
3943 int *max_out = user;
3944 int n_out = isl_map_dim(map, isl_dim_out);
3946 if (n_out > *max_out)
3947 *max_out = n_out;
3949 isl_map_free(map);
3950 return 0;
3953 struct align_range_data {
3954 int max_out;
3955 isl_union_map *res;
3958 /* Extend the dimension of the range of the given map to data->max_out and
3959 * then add the result to data->res.
3961 static int map_align_range(__isl_take isl_map *map, void *user)
3963 struct align_range_data *data = user;
3964 int i;
3965 isl_dim *dim;
3966 isl_map *proj;
3967 int n_out = isl_map_dim(map, isl_dim_out);
3969 dim = isl_union_map_get_dim(data->res);
3970 proj = isl_map_reverse(projection(dim, data->max_out, n_out));
3971 for (i = n_out; i < data->max_out; ++i)
3972 proj = isl_map_fix_si(proj, isl_dim_out, i, 0);
3974 map = isl_map_apply_range(map, proj);
3976 data->res = isl_union_map_add_map(data->res, map);
3978 return 0;
3981 /* Extend the ranges of the maps in the union map such they all have
3982 * the same dimension.
3984 static __isl_give isl_union_map *align_range(__isl_take isl_union_map *umap)
3986 struct align_range_data data;
3988 data.max_out = 0;
3989 isl_union_map_foreach_map(umap, &update_max_out, &data.max_out);
3991 data.res = isl_union_map_empty(isl_union_map_get_dim(umap));
3992 isl_union_map_foreach_map(umap, &map_align_range, &data);
3994 isl_union_map_free(umap);
3995 return data.res;
3998 /* Select the outermost tilable band that (by construction)
3999 * has at least one parallel loop.
4000 * The starting position of the aligned band is stored in the pair
4001 * gen->tile_first.
4002 * The sizes and number of parallel loops may be different in different
4003 * parts of the band forest and are therefore stored in the cuda_stmts.
4005 * Return the complete schedule, with the tilable bands aligned
4006 * at gen->tile_first and padded with zero, if needed.
4008 static __isl_give isl_union_map *select_outer_tilable_band(struct cuda_gen *gen,
4009 __isl_keep isl_schedule *schedule)
4011 isl_band_list *list;
4012 struct band_info info;
4014 gen->n_parallel = 0;
4015 gen->tile_len = -1;
4017 list = isl_schedule_get_band_forest(schedule);
4019 list_select_outer_band(gen, list, 0, &info);
4021 gen->tile_first = info.tile_first;
4022 info.suffix = align_range(info.suffix);
4024 return isl_union_map_flat_range_product(info.prefix, info.suffix);
4027 /* Set gen->untiled_len to the number of scheduling dimensions
4028 * for the schedule of the first domain.
4029 * We assume here that this number is the same for all domains.
4031 static int set_untiled_len(__isl_take isl_map *map, void *user)
4033 unsigned *untiled_len = user;
4035 *untiled_len = isl_map_dim(map, isl_dim_out);
4037 isl_map_free(map);
4038 return -1;
4041 /* Compute an appropriate schedule based on the accesses in
4042 * gen->read and gen->write.
4044 * We first compute dependences and then use those to compute
4045 * a schedule that has a parallel loop in each tilable band.
4046 * Finally, we select the outermost tilable band.
4048 static void compute_schedule(struct cuda_gen *gen,
4049 __isl_take isl_union_map *sched)
4051 isl_ctx *ctx = isl_union_map_get_ctx(sched);
4052 isl_union_set *domain;
4053 isl_union_map *empty;
4054 isl_union_map *dep_raw, *dep2, *dep3, *dep;
4055 isl_union_map *uninitialized;
4056 isl_schedule *schedule;
4057 struct isl_options *options;
4059 empty = isl_union_map_empty(isl_union_map_get_dim(sched));
4061 isl_union_map_compute_flow(isl_union_map_copy(gen->read),
4062 isl_union_map_copy(gen->write), empty,
4063 isl_union_map_copy(sched),
4064 &dep_raw, NULL, &uninitialized, NULL);
4065 isl_union_map_compute_flow(isl_union_map_copy(gen->write),
4066 isl_union_map_copy(gen->write),
4067 isl_union_map_copy(gen->read),
4068 isl_union_map_copy(sched),
4069 &dep2, &dep3, NULL, NULL);
4070 isl_union_map_free(sched);
4072 gen->copy_in = isl_union_map_range(uninitialized);
4074 dep = isl_union_map_union(dep2, dep3);
4075 dep = isl_union_map_union(dep, dep_raw);
4076 dep = isl_union_map_coalesce(dep);
4078 domain = extract_domain(gen);
4079 options = isl_ctx_peek_options(ctx, isl_options_arg);
4080 options->schedule_outer_zero_distance = 1;
4081 schedule = isl_union_set_compute_schedule(isl_union_set_copy(domain),
4082 isl_union_map_copy(dep), dep);
4084 sched = select_outer_tilable_band(gen, schedule);
4086 isl_union_map_foreach_map(sched, &set_untiled_len, &gen->untiled_len);
4087 sched = isl_union_map_intersect_domain(sched, domain);
4088 gen->sched = sched;
4090 isl_schedule_free(schedule);
4093 static struct cuda_stmt_access **expr_extract_access(struct pet_expr *expr,
4094 struct cuda_stmt_access **next_access)
4096 struct cuda_stmt_access *access;
4097 isl_ctx *ctx = isl_map_get_ctx(expr->acc.access);
4099 access = isl_alloc_type(ctx, struct cuda_stmt_access);
4100 assert(access);
4101 access->next = NULL;
4102 access->read = expr->acc.read;
4103 access->write = expr->acc.write;
4104 access->access = isl_map_copy(expr->acc.access);
4106 *next_access = access;
4107 next_access = &(*next_access)->next;
4108 return next_access;
4111 static struct cuda_stmt_access **expr_extract_accesses(struct pet_expr *expr,
4112 struct cuda_stmt_access **next_access)
4114 int i;
4116 for (i = 0; i < expr->n_arg; ++i)
4117 next_access = expr_extract_accesses(expr->args[i],
4118 next_access);
4120 if (expr->type == pet_expr_access)
4121 next_access = expr_extract_access(expr, next_access);
4123 return next_access;
4126 static void pet_stmt_extract_accesses(struct cuda_stmt *stmt)
4128 struct cuda_stmt_access **next_access = &stmt->accesses;
4130 stmt->accesses = NULL;
4131 expr_extract_accesses(stmt->body, next_access);
4134 /* Return an array of cuda_stmt representing the statements in "scop".
4136 static struct cuda_stmt *extract_stmts(isl_ctx *ctx, struct pet_scop *scop,
4137 __isl_keep isl_set *context)
4139 int i;
4140 struct cuda_stmt *stmts;
4142 stmts = isl_calloc_array(ctx, struct cuda_stmt, scop->n_stmt);
4143 assert(stmts);
4145 for (i = 0; i < scop->n_stmt; ++i) {
4146 struct cuda_stmt *s = &stmts[i];
4148 s->domain = isl_set_copy(scop->stmts[i]->domain);
4149 s->domain = isl_set_intersect(s->domain, isl_set_copy(context));
4150 s->body = scop->stmts[i]->body;
4151 pet_stmt_extract_accesses(s);
4154 return stmts;
4157 /* Replace the scop in the "input" file by equivalent code
4158 * that uses the GPU. "scop" is assumed to correspond to this scop.
4160 * We first compute a schedule that respects the dependences
4161 * of the original program and select the outermost band
4162 * of tilable dimensions that has at least one parallel loop.
4163 * We then have three blocks of dimensions
4165 * H B G
4167 * The tilable band "B" is first tiled according to "tile.sizes", resulting
4168 * in
4170 * H T P G
4172 * For each iteration of the T loop and for each array, we compute
4173 * the array elements accessed by that iteration, construct a rectangular
4174 * box around it and shift it to the origin. The result is used
4175 * as shared memory for the array.
4177 * We then split off at most 2 parallel loops from the T loops and
4178 * at most 3 parallel loops from the P loops
4180 * H T1 T2 P1 P2 G
4182 * The T1/P1 loops are then tiled or "wrapped" over the blocks/threads,
4183 * according to "grid.sizes"/"block.sizes".
4185 * H T1T T1P T2 P1T P1P P2 G
4187 * Finally, the T1P and P1P iterators are equated to the block and
4188 * thread dimensions respectively and so are effectively removed.
4189 * The H loops are run on the host. The T1T, T2, P1T, P2 and G loops
4190 * are run on the GPU.
4192 * Code is generated in three stages. We first generate code for the
4193 * host (the H loops), with iterators h%d. Then, for each leaf node
4194 * of the resulting AST, we generate code for the shared loops (up to
4195 * and including T2), with iterators g%d and after equating the H loops
4196 * to h%d parameters and the T1P loops to the block dimensions.
4197 * Finally, we generate code for the remaining loops in a similar fashion.
4199 * The function frees "scop" and "ctx".
4201 int cuda_pet(isl_ctx *ctx, struct pet_scop *scop, struct ppcg_options *options,
4202 const char *input)
4204 isl_union_map *sched;
4205 struct cuda_gen gen;
4207 scop = pet_scop_align_params(scop);
4209 gen.ctx = ctx;
4210 gen.context = isl_set_copy(scop->context);
4211 gen.context = add_context_from_str(gen.context, options->ctx);
4212 gen.n_stmts = scop->n_stmt;
4213 gen.stmts = extract_stmts(ctx, scop, gen.context);
4214 gen.read = pet_scop_collect_reads(scop);
4215 gen.write = pet_scop_collect_writes(scop);
4216 gen.options = options;
4217 gen.state = cloog_isl_state_malloc(gen.ctx);
4219 cuda_open_files(&gen.cuda, input);
4221 collect_array_info(&gen);
4223 sched = pet_scop_collect_schedule(scop);
4225 compute_schedule(&gen, sched);
4227 print_host_code(&gen);
4229 cloog_state_free(gen.state);
4230 clear_cuda_gen(&gen);
4232 cuda_close_files(&gen.cuda);
4234 return 0;