2 * Copyright 2010-2011 INRIA Saclay
4 * Use of this software is governed by the GNU LGPLv2.1 license
6 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
7 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
14 #include <isl/polynomial.h>
15 #include <isl/union_set.h>
20 #include <isl/schedule.h>
21 #include <isl/options.h>
22 #include <cloog/isl/cloog.h>
25 #include "cuda_common.h"
28 #include "ppcg_options.h"
30 /* The fields stride, shift and shift_map only contain valid information
32 * If so, they express that current index is such that if you add shift,
33 * then the result is always a multiple of stride.
34 * shift_map contains the mapping
36 * i -> (i + shift)/stride
38 struct cuda_array_bound
{
43 isl_qpolynomial
*shift
;
44 isl_basic_map
*shift_map
;
47 struct cuda_array_info
;
49 /* A group of array references in a kernel that should be handled together.
50 * If private_bound is not NULL, then it is mapped to registers.
51 * Otherwise, if shared_bound is not NULL, it is mapped to shared memory.
52 * Otherwise, it is accesses from global memory.
54 struct cuda_array_ref_group
{
55 /* The references in this group access this array. */
56 struct cuda_array_info
*array
;
57 /* Position of this group in the list of reference groups of array. */
60 /* The following fields are use during the construction of the groups.
61 * access is the combined access relation relative to the shared
63 * write is set if any access in the group is a write.
68 /* For each index, size and offset of piece in shared memory. */
69 struct cuda_array_bound
*shared_bound
;
71 /* For each index, size and offset of piece in private memory. */
72 struct cuda_array_bound
*private_bound
;
74 /* References in this group; point to elements of a linked list. */
76 struct cuda_stmt_access
**refs
;
79 struct cuda_array_info
{
83 /* Name of the array. */
85 /* Number of indices. */
87 /* For each index, a bound on the array in that direction. */
89 /* For each index, bound[i] specialized to the current kernel. */
90 isl_pw_aff
**local_bound
;
92 /* All references to this array; point to elements of a linked list. */
94 struct cuda_stmt_access
**refs
;
96 /* The reference groups associated to this array. */
98 struct cuda_array_ref_group
**groups
;
100 /* Last shared memory tile dimension that affects tile of this array. */
102 /* Dimension at which copying to/from shared memory is printed.
103 * if >= 0, then the value is >= last_shared
104 * if -1, then the copying is done at the leaf level.
106 int print_shared_level
;
109 /* Print the name of the local copy of a given group of array references.
111 static void print_array_name(FILE *out
, struct cuda_array_ref_group
*group
)
115 if (group
->private_bound
)
116 fprintf(out
, "private_");
117 else if (group
->shared_bound
)
118 fprintf(out
, "shared_");
121 fprintf(out
, "%s", group
->array
->name
);
122 if (!global
&& group
->array
->n_group
> 1)
123 fprintf(out
, "_%d", group
->nr
);
126 /* Collect all references to the given array and store pointers to them
129 static void collect_references(struct cuda_gen
*gen
,
130 struct cuda_array_info
*array
)
136 for (i
= 0; i
< gen
->n_stmts
; ++i
) {
137 struct cuda_stmt
*stmt
= &gen
->stmts
[i
];
138 struct cuda_stmt_access
*access
;
140 for (access
= stmt
->accesses
; access
; access
= access
->next
) {
142 name
= isl_map_get_tuple_name(access
->access
,
144 if (name
&& !strcmp(array
->name
, name
))
150 array
->refs
= isl_alloc_array(gen
->ctx
, struct cuda_stmt_access
*, n
);
154 for (i
= 0; i
< gen
->n_stmts
; ++i
) {
155 struct cuda_stmt
*stmt
= &gen
->stmts
[i
];
156 struct cuda_stmt_access
*access
;
158 for (access
= stmt
->accesses
; access
; access
= access
->next
) {
160 name
= isl_map_get_tuple_name(access
->access
,
162 if (!name
|| strcmp(array
->name
, name
))
165 array
->refs
[n
++] = access
;
170 static struct cuda_array_bound
*create_bound_list(isl_ctx
*ctx
, int n_index
)
173 struct cuda_array_bound
*bound
;
175 bound
= isl_alloc_array(ctx
, struct cuda_array_bound
, n_index
);
178 for (i
= 0; i
< n_index
; ++i
) {
179 isl_int_init(bound
[i
].size
);
181 isl_int_init(bound
[i
].stride
);
182 bound
[i
].shift
= NULL
;
183 bound
[i
].shift_map
= NULL
;
189 static void free_bound_list(struct cuda_array_bound
*bound
, int n_index
)
196 for (j
= 0; j
< n_index
; ++j
) {
197 isl_int_clear(bound
[j
].size
);
198 isl_int_clear(bound
[j
].stride
);
199 isl_aff_free(bound
[j
].lb
);
200 isl_qpolynomial_free(bound
[j
].shift
);
201 isl_basic_map_free(bound
[j
].shift_map
);
206 static struct pet_array
*find_array(struct pet_scop
*scop
,
207 __isl_keep isl_set
*accessed
)
212 id
= isl_set_get_tuple_id(accessed
);
214 for (i
= 0; i
< scop
->n_array
; ++i
) {
217 id_i
= isl_set_get_tuple_id(scop
->arrays
[i
]->extent
);
224 return i
< scop
->n_array
? scop
->arrays
[i
] : NULL
;
227 /* Compute bounds on the host arrays based on the accessed elements
228 * and collect all references to the array.
230 static int extract_array_info(__isl_take isl_set
*array
, void *user
)
233 struct cuda_gen
*gen
= (struct cuda_gen
*)user
;
237 isl_pw_aff
**local_bounds
;
238 struct pet_array
*pa
;
240 n_index
= isl_set_dim(array
, isl_dim_set
);
241 name
= isl_set_get_tuple_name(array
);
242 bounds
= isl_alloc_array(isl_set_get_ctx(array
),
243 isl_pw_aff
*, n_index
);
245 local_bounds
= isl_calloc_array(isl_set_get_ctx(array
),
246 isl_pw_aff
*, n_index
);
247 assert(local_bounds
);
248 gen
->array
[gen
->n_array
].dim
= isl_set_get_space(array
);
249 gen
->array
[gen
->n_array
].name
= strdup(name
);
250 gen
->array
[gen
->n_array
].n_index
= n_index
;
251 gen
->array
[gen
->n_array
].bound
= bounds
;
252 gen
->array
[gen
->n_array
].local_bound
= local_bounds
;
254 pa
= find_array(gen
->scop
, array
);
257 gen
->array
[gen
->n_array
].type
= strdup(pa
->element_type
);
259 for (i
= 0; i
< n_index
; ++i
) {
264 isl_set
*size
= i
== 0 ? array
: pa
->extent
;
266 bound
= isl_set_dim_max(isl_set_copy(size
), i
);
268 dom
= isl_pw_aff_domain(isl_pw_aff_copy(bound
));
269 ls
= isl_local_space_from_space(isl_set_get_space(dom
));
270 one
= isl_aff_zero_on_domain(ls
);
271 one
= isl_aff_add_constant_si(one
, 1);
272 bound
= isl_pw_aff_add(bound
, isl_pw_aff_alloc(dom
, one
));
273 bound
= isl_pw_aff_gist(bound
, isl_set_copy(gen
->context
));
278 collect_references(gen
, &gen
->array
[gen
->n_array
]);
286 void collect_array_info(struct cuda_gen
*gen
)
288 isl_union_set
*arrays
;
290 arrays
= isl_union_map_range(isl_union_map_copy(gen
->read
));
291 arrays
= isl_union_set_union(arrays
,
292 isl_union_map_range(isl_union_map_copy(gen
->write
)));
293 arrays
= isl_union_set_coalesce(arrays
);
295 gen
->n_array
= isl_union_set_n_set(arrays
);
296 gen
->array
= isl_alloc_array(gen
->ctx
,
297 struct cuda_array_info
, gen
->n_array
);
300 isl_union_set_foreach_set(arrays
, &extract_array_info
, gen
);
301 isl_union_set_free(arrays
);
304 static void free_array_info(struct cuda_gen
*gen
)
308 for (i
= 0; i
< gen
->n_array
; ++i
) {
309 int n_index
= gen
->array
[i
].n_index
;
310 free(gen
->array
[i
].type
);
311 free(gen
->array
[i
].name
);
312 for (j
= 0; j
< n_index
; ++j
) {
313 isl_pw_aff_free(gen
->array
[i
].bound
[j
]);
314 isl_pw_aff_free(gen
->array
[i
].local_bound
[j
]);
316 isl_space_free(gen
->array
[i
].dim
);
317 free(gen
->array
[i
].bound
);
318 free(gen
->array
[i
].local_bound
);
319 free(gen
->array
[i
].refs
);
324 static void declare_device_arrays(struct cuda_gen
*gen
)
328 for (i
= 0; i
< gen
->n_array
; ++i
)
329 fprintf(gen
->cuda
.host_c
, "%s *dev_%s;\n",
330 gen
->array
[i
].type
, gen
->array
[i
].name
);
331 fprintf(gen
->cuda
.host_c
, "\n");
334 static void print_array_size(struct cuda_gen
*gen
, FILE *out
,
335 struct cuda_array_info
*array
)
340 prn
= isl_printer_to_file(gen
->ctx
, out
);
341 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
342 for (i
= 0; i
< array
->n_index
; ++i
) {
343 prn
= isl_printer_print_str(prn
, "(");
344 prn
= isl_printer_print_pw_aff(prn
, array
->bound
[i
]);
345 prn
= isl_printer_print_str(prn
, ") * ");
347 prn
= isl_printer_print_str(prn
, "sizeof(");
348 prn
= isl_printer_print_str(prn
, array
->type
);
349 prn
= isl_printer_print_str(prn
, ")");
350 isl_printer_free(prn
);
353 static void allocate_device_arrays(struct cuda_gen
*gen
)
357 for (i
= 0; i
< gen
->n_array
; ++i
) {
358 fprintf(gen
->cuda
.host_c
,
359 "cudaCheckReturn(cudaMalloc((void **) &dev_%s, ",
361 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
362 fprintf(gen
->cuda
.host_c
, "));\n");
364 fprintf(gen
->cuda
.host_c
, "\n");
367 static void free_device_arrays(struct cuda_gen
*gen
)
371 for (i
= 0; i
< gen
->n_array
; ++i
)
372 fprintf(gen
->cuda
.host_c
, "cudaCheckReturn(cudaFree(dev_%s));\n",
376 /* Check if a cuda array is a scalar. A scalar is a value that is not stored
377 * as an array or through a pointer reference, but as single data element. At
378 * the moment, scalars are represented as zero dimensional arrays.
380 static int cuda_array_is_scalar(struct cuda_array_info
*array
)
382 return (array
->n_index
== 0);
385 static void copy_arrays_to_device(struct cuda_gen
*gen
)
389 for (i
= 0; i
< gen
->n_array
; ++i
) {
394 dim
= isl_space_copy(gen
->array
[i
].dim
);
395 read_i
= isl_union_set_extract_set(gen
->copy_in
, dim
);
396 empty
= isl_set_fast_is_empty(read_i
);
397 isl_set_free(read_i
);
401 fprintf(gen
->cuda
.host_c
, "cudaCheckReturn(cudaMemcpy(dev_%s,",
404 if (cuda_array_is_scalar(&(gen
->array
[i
])))
405 fprintf(gen
->cuda
.host_c
, " &%s, ",
408 fprintf(gen
->cuda
.host_c
, " %s, ", gen
->array
[i
].name
);
410 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
411 fprintf(gen
->cuda
.host_c
, ", cudaMemcpyHostToDevice));\n");
413 fprintf(gen
->cuda
.host_c
, "\n");
416 static void copy_arrays_from_device(struct cuda_gen
*gen
)
419 isl_union_set
*write
;
420 write
= isl_union_map_range(isl_union_map_copy(gen
->write
));
422 for (i
= 0; i
< gen
->n_array
; ++i
) {
427 dim
= isl_space_copy(gen
->array
[i
].dim
);
428 write_i
= isl_union_set_extract_set(write
, dim
);
429 empty
= isl_set_fast_is_empty(write_i
);
430 isl_set_free(write_i
);
434 fprintf(gen
->cuda
.host_c
,
435 "cudaCheckReturn(cudaMemcpy(%s, dev_%s, ",
436 gen
->array
[i
].name
, gen
->array
[i
].name
);
437 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
438 fprintf(gen
->cuda
.host_c
, ", cudaMemcpyDeviceToHost));\n");
441 isl_union_set_free(write
);
442 fprintf(gen
->cuda
.host_c
, "\n");
445 static void read_sizes_from_file(struct cuda_gen
*gen
, const char *filename
,
451 file
= fopen(filename
, "r");
455 for (i
= 0; i
< len
; ++i
)
456 if (fscanf(file
, "%d", &sizes
[i
]) < 1)
462 static void reverse_list(int *list
, int len
)
467 for (i
= 0; 2 * i
< len
; ++i
) {
469 list
[i
] = list
[len
- 1 - i
];
470 list
[len
- 1 - i
] = t
;
474 /* Read user specified sizes from "tile.sizes", "block.sizes" and "grid.sizes"
475 * after filling in some potentially useful defaults.
477 static void read_sizes(struct cuda_gen
*gen
)
481 gen
->tile_size
= isl_alloc_array(gen
->ctx
, int, gen
->tile_len
);
482 assert(gen
->tile_size
);
483 for (n
= 0; n
< gen
->tile_len
; ++n
)
484 gen
->tile_size
[n
] = gen
->options
->tile_size
;
485 read_sizes_from_file(gen
, "tile.sizes", gen
->tile_size
, gen
->tile_len
);
488 gen
->n_block
= (n
<= 3) ? n
: 3;
489 switch (gen
->n_block
) {
491 gen
->block_dim
[0] = 512;
494 gen
->block_dim
[0] = 32;
495 gen
->block_dim
[1] = 16;
498 gen
->block_dim
[0] = 32;
499 gen
->block_dim
[1] = 4;
500 gen
->block_dim
[2] = 4;
503 read_sizes_from_file(gen
, "block.sizes", gen
->block_dim
, gen
->n_block
);
504 reverse_list(gen
->block_dim
, gen
->n_block
);
506 gen
->n_grid
= (n
<= 2) ? n
: 2;
507 switch (gen
->n_grid
) {
509 gen
->grid_dim
[0] = 32768;
512 gen
->grid_dim
[0] = 256;
513 gen
->grid_dim
[1] = 256;
516 read_sizes_from_file(gen
, "grid.sizes", gen
->grid_dim
, gen
->n_grid
);
517 reverse_list(gen
->grid_dim
, gen
->n_grid
);
520 static void free_stmts(struct cuda_stmt
*stmts
, int n
)
524 for (i
= 0; i
< n
; ++i
) {
525 struct cuda_stmt_access
*access
, *next
;
527 for (access
= stmts
[i
].accesses
; access
; access
= next
) {
529 isl_map_free(access
->access
);
533 isl_set_free(stmts
[i
].domain
);
538 void clear_cuda_gen(struct cuda_gen
*gen
)
540 free_stmts(gen
->stmts
, gen
->n_stmts
);
541 free_array_info(gen
);
542 isl_set_free(gen
->context
);
543 isl_union_set_free(gen
->copy_in
);
544 isl_union_map_free(gen
->sched
);
545 isl_union_map_free(gen
->read
);
546 isl_union_map_free(gen
->write
);
549 static void print_reverse_list(FILE *out
, int len
, int *list
)
553 for (i
= 0; i
< len
; ++i
) {
556 fprintf(out
, "%d", list
[len
- 1 - i
]);
560 static void print_kernel_launch(struct cuda_gen
*gen
,
561 __isl_keep isl_union_set
*arrays
)
568 print_indent(gen
->code
.dst
, gen
->code
.indent
);
569 fprintf(gen
->code
.dst
, "kernel%d <<<k%d_dimGrid, k%d_dimBlock>>> (",
570 gen
->kernel_id
, gen
->kernel_id
, gen
->kernel_id
);
571 fprintf(gen
->cuda
.kernel_c
, "__global__ void kernel%d(",
573 fprintf(gen
->cuda
.kernel_h
, "__global__ void kernel%d(",
576 for (i
= 0; i
< gen
->n_array
; ++i
) {
581 dim
= isl_space_copy(gen
->array
[i
].dim
);
582 arr
= isl_union_set_extract_set(arrays
, dim
);
583 empty
= isl_set_fast_is_empty(arr
);
589 fprintf(gen
->code
.dst
, ", ");
590 fprintf(gen
->cuda
.kernel_c
, ", ");
591 fprintf(gen
->cuda
.kernel_h
, ", ");
594 fprintf(gen
->code
.dst
, "dev_%s", gen
->array
[i
].name
);
595 fprintf(gen
->cuda
.kernel_c
, "%s *%s",
596 gen
->array
[i
].type
, gen
->array
[i
].name
);
597 fprintf(gen
->cuda
.kernel_h
, "%s *%s",
598 gen
->array
[i
].type
, gen
->array
[i
].name
);
603 dim
= isl_union_set_get_space(arrays
);
604 nparam
= isl_space_dim(dim
, isl_dim_param
);
605 for (i
= 0; i
< nparam
; ++i
) {
606 const char *name
= isl_space_get_dim_name(dim
, isl_dim_param
, i
);
608 fprintf(gen
->code
.dst
, ", ");
609 fprintf(gen
->cuda
.kernel_c
, ", ");
610 fprintf(gen
->cuda
.kernel_h
, ", ");
612 fprintf(gen
->code
.dst
, "%s", name
);
613 fprintf(gen
->cuda
.kernel_c
, "int %s", name
);
614 fprintf(gen
->cuda
.kernel_h
, "int %s", name
);
619 for (i
= 0; i
< gen
->tile_first
; ++i
) {
621 fprintf(gen
->code
.dst
, ", ");
622 fprintf(gen
->cuda
.kernel_c
, ", ");
623 fprintf(gen
->cuda
.kernel_h
, ", ");
625 fprintf(gen
->code
.dst
, "h%d", i
);
626 fprintf(gen
->cuda
.kernel_c
, "int h%d", i
);
627 fprintf(gen
->cuda
.kernel_h
, "int h%d", i
);
631 fprintf(gen
->code
.dst
, ");\n");
632 fprintf(gen
->cuda
.kernel_c
, ")\n");
633 fprintf(gen
->cuda
.kernel_h
, ");\n");
635 fprintf(gen
->code
.dst
, "cudaCheckKernel();\n");
638 /* Construct a map from a domain of dimensionality "len"
639 * to a domain of dimensionality "len" + "tile_len" that tiles
640 * the "tile_len" coordinates starting at "first".
641 * In particular, [s_i] -> [s_i / tile_size[i], s_i % tile_size[i]].
642 * "dim" prescribes the parameters.
644 static __isl_give isl_map
*tile(__isl_take isl_space
*dim
, int len
,
645 int first
, int tile_len
, int *tile_size
)
655 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
656 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
+ tile_len
);
657 bmap
= isl_basic_map_universe(isl_space_copy(dim
));
658 ls
= isl_local_space_from_space(dim
);
660 for (i
= 0; i
< len
- tile_len
; ++i
) {
661 int j
= i
< first
? i
: i
+ tile_len
;
662 int k
= i
< first
? i
: i
+ 2 * tile_len
;
664 c
= isl_equality_alloc(isl_local_space_copy(ls
));
665 isl_int_set_si(v
, -1);
666 isl_constraint_set_coefficient(c
, isl_dim_in
, j
, v
);
667 isl_int_set_si(v
, 1);
668 isl_constraint_set_coefficient(c
, isl_dim_out
, k
, v
);
669 bmap
= isl_basic_map_add_constraint(bmap
, c
);
672 for (i
= 0; i
< tile_len
; ++i
) {
673 c
= isl_equality_alloc(isl_local_space_copy(ls
));
674 isl_int_set_si(v
, -1);
675 isl_constraint_set_coefficient(c
, isl_dim_in
, first
+ i
, v
);
676 isl_int_set_si(v
, tile_size
[i
]);
677 isl_constraint_set_coefficient(c
, isl_dim_out
, first
+ i
, v
);
678 isl_int_set_si(v
, 1);
679 isl_constraint_set_coefficient(c
, isl_dim_out
,
680 first
+ i
+ tile_len
, v
);
681 bmap
= isl_basic_map_add_constraint(bmap
, c
);
683 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
684 isl_int_set_si(v
, 1);
685 isl_constraint_set_coefficient(c
, isl_dim_out
,
686 first
+ i
+ tile_len
, v
);
687 bmap
= isl_basic_map_add_constraint(bmap
, c
);
689 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
690 isl_int_set_si(v
, -1);
691 isl_constraint_set_coefficient(c
, isl_dim_out
,
692 first
+ i
+ tile_len
, v
);
693 isl_int_set_si(v
, tile_size
[i
] - 1);
694 isl_constraint_set_constant(c
, v
);
695 bmap
= isl_basic_map_add_constraint(bmap
, c
);
698 isl_local_space_free(ls
);
701 return isl_map_from_basic_map(bmap
);
704 /* Construct a map from a domain of dimensionality "len"
705 * to a domain of dimensionality "len" + "wrap_len" that "wraps"
706 * the "wrap_len" coordinates starting at "first" according to "wrap_size".
707 * In particular, [s_i] -> [s_i, s_i % wrap_size[i]].
708 * To do so, we need extra variables corresponding to [s_i / wrap_size[i]],
709 * that are projected out at the end.
710 * "dim" prescribes the parameters.
712 static __isl_give isl_map
*wrap(__isl_take isl_space
*dim
, int len
,
713 int first
, int wrap_len
, int *wrap_size
)
720 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
721 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
+ 2 * wrap_len
);
722 bmap
= isl_basic_map_universe(isl_space_copy(dim
));
723 ls
= isl_local_space_from_space(dim
);
725 for (i
= 0; i
< len
; ++i
) {
726 int k
= i
< first
+ wrap_len
? i
: i
+ 2 * wrap_len
;
728 c
= isl_equality_alloc(isl_local_space_copy(ls
));
729 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, -1);
730 isl_constraint_set_coefficient_si(c
, isl_dim_out
, k
, 1);
731 bmap
= isl_basic_map_add_constraint(bmap
, c
);
734 for (i
= 0; i
< wrap_len
; ++i
) {
735 c
= isl_equality_alloc(isl_local_space_copy(ls
));
736 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
738 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
739 first
+ wrap_len
+ i
, 1);
740 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
741 first
+ 2 * wrap_len
+ i
, wrap_size
[i
]);
742 bmap
= isl_basic_map_add_constraint(bmap
, c
);
744 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
745 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
746 first
+ wrap_len
+ i
, 1);
747 bmap
= isl_basic_map_add_constraint(bmap
, c
);
749 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
750 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
751 first
+ wrap_len
+ i
, -1);
752 isl_constraint_set_constant_si(c
, wrap_size
[i
] - 1);
753 bmap
= isl_basic_map_add_constraint(bmap
, c
);
756 isl_local_space_free(ls
);
758 bmap
= isl_basic_map_project_out(bmap
, isl_dim_out
,
759 first
+ 2 * wrap_len
, wrap_len
);
761 return isl_map_from_basic_map(bmap
);
764 /* Add "n" parameters named prefix%d.
766 static __isl_give isl_set
*add_params( __isl_take isl_set
*set
,
767 int n
, const char *prefix
)
773 nparam
= isl_set_dim(set
, isl_dim_param
);
774 set
= isl_set_add_dims(set
, isl_dim_param
, n
);
776 for (i
= 0; i
< n
; ++i
) {
777 snprintf(name
, sizeof(name
), "%s%d", prefix
, i
);
778 set
= isl_set_set_dim_name(set
, isl_dim_param
,
785 /* Equate the "n" dimensions of "set" starting at "first" to
786 * freshly created parameters named prefix%d.
788 static __isl_give isl_set
*parametrize(__isl_take isl_set
*set
,
789 int first
, int n
, const char *prefix
)
799 nparam
= isl_set_dim(set
, isl_dim_param
);
801 set
= add_params(set
, n
, prefix
);
803 dim
= isl_set_get_space(set
);
804 bset
= isl_basic_set_universe(isl_space_copy(dim
));
805 ls
= isl_local_space_from_space(dim
);
809 for (i
= 0; i
< n
; ++i
) {
810 c
= isl_equality_alloc(isl_local_space_copy(ls
));
811 isl_int_set_si(v
, -1);
812 isl_constraint_set_coefficient(c
, isl_dim_param
, nparam
+ i
, v
);
813 isl_int_set_si(v
, 1);
814 isl_constraint_set_coefficient(c
, isl_dim_set
, first
+ i
, v
);
815 bset
= isl_basic_set_add_constraint(bset
, c
);
819 isl_local_space_free(ls
);
821 return isl_set_intersect(set
, isl_set_from_basic_set(bset
));
824 static __isl_give isl_set
*parametrization(__isl_take isl_space
*dim
,
825 int len
, int first
, int n
, const char *prefix
)
829 dim
= isl_space_add_dims(dim
, isl_dim_set
, len
);
830 set
= isl_set_universe(dim
);
832 return parametrize(set
, first
, n
, prefix
);
835 /* Tile the B loops over the tile sizes and then tile/wrap
836 * the T1 loops over the blocks.
838 static __isl_give isl_union_map
*tile_schedule(struct cuda_gen
*gen
,
839 __isl_take isl_union_map
*sched
)
842 isl_map
*tiling
, *block_tiling
;
844 dim
= isl_union_map_get_space(sched
);
845 tiling
= tile(isl_space_copy(dim
), gen
->untiled_len
,
846 gen
->tile_first
, gen
->tile_len
, gen
->tile_size
);
848 if (gen
->options
->wrap
)
849 block_tiling
= wrap(dim
, gen
->untiled_len
+ gen
->tile_len
,
850 gen
->tile_first
, gen
->n_grid
, gen
->grid_dim
);
852 block_tiling
= tile(dim
, gen
->untiled_len
+ gen
->tile_len
,
853 gen
->tile_first
, gen
->n_grid
, gen
->grid_dim
);
855 gen
->tiled_len
= gen
->untiled_len
+ gen
->tile_len
+ gen
->n_grid
;
857 tiling
= isl_map_apply_range(tiling
, block_tiling
);
859 sched
= isl_union_map_apply_range(sched
,
860 isl_union_map_from_map(tiling
));
862 gen
->shared_len
= gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
;
867 static __isl_give isl_union_map
*parametrize_tiled_schedule(
868 struct cuda_gen
*gen
, __isl_take isl_union_map
*sched
)
873 dim
= isl_union_map_get_space(sched
);
874 par
= parametrization(dim
, gen
->tiled_len
, 0, gen
->tile_first
, "h");
875 sched
= isl_union_map_intersect_range(sched
,
876 isl_union_set_from_set(par
));
878 dim
= isl_union_map_get_space(sched
);
879 par
= parametrization(dim
, gen
->tiled_len
,
880 gen
->tile_first
+ gen
->n_grid
, gen
->n_grid
, "b");
881 sched
= isl_union_map_intersect_range(sched
,
882 isl_union_set_from_set(par
));
887 /* Tile/wrap the P1 loops over the threads.
889 static __isl_give isl_union_map
*thread_tile_schedule(struct cuda_gen
*gen
,
890 __isl_take isl_union_map
*sched
)
896 dim
= isl_union_map_get_space(sched
);
898 if (gen
->options
->wrap
)
899 tiling
= wrap(isl_space_copy(dim
), gen
->tiled_len
,
900 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
902 tiling
= tile(isl_space_copy(dim
), gen
->tiled_len
,
903 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
904 gen
->thread_tiled_len
= gen
->tiled_len
+ gen
->n_block
;
906 sched
= isl_union_map_apply_range(sched
,
907 isl_union_map_from_map(tiling
));
909 par
= parametrization(dim
, gen
->thread_tiled_len
,
910 gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
+ gen
->n_block
,
912 sched
= isl_union_map_intersect_range(sched
,
913 isl_union_set_from_set(par
));
915 gen
->shared_len
= gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
;
920 /* If the user asked for it, scale the shared memory tile loops
921 * (T1P and T2) of "sched" by gen->tile_size[i].
922 * If we are not performing "wrapping", then additionally scale the T1P
923 * loops by gen->grid_dim[i].
925 static __isl_give isl_union_map
*scale_tile_loops(struct cuda_gen
*gen
,
926 __isl_take isl_union_map
*sched
)
930 isl_basic_map
*scale
;
934 if (!gen
->options
->scale_tile_loops
)
937 dim
= isl_union_map_get_space(sched
);
938 dim
= isl_space_add_dims(dim
, isl_dim_in
, gen
->tiled_len
);
939 dim
= isl_space_add_dims(dim
, isl_dim_out
, gen
->tiled_len
);
940 scale
= isl_basic_map_universe(isl_space_copy(dim
));
941 ls
= isl_local_space_from_space(dim
);
943 for (i
= 0; i
< gen
->tiled_len
; ++i
) {
946 if (i
>= gen
->tile_first
&& i
< gen
->tile_first
+ gen
->n_grid
) {
947 f
= gen
->tile_size
[i
- gen
->tile_first
];
948 if (!gen
->options
->wrap
)
949 f
*= gen
->grid_dim
[i
- gen
->tile_first
];
950 } else if (i
>= gen
->tile_first
+ gen
->n_grid
&&
951 i
< gen
->tile_first
+ gen
->n_grid
+ gen
->tile_len
) {
952 f
= gen
->tile_size
[i
- (gen
->tile_first
+ gen
->n_grid
)];
955 c
= isl_equality_alloc(isl_local_space_copy(ls
));
956 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
957 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
958 scale
= isl_basic_map_add_constraint(scale
, c
);
961 isl_local_space_free(ls
);
963 sched
= isl_union_map_apply_range(sched
,
964 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
969 /* If we are not performing "wrapping" and if the user asked for it,
970 * scale the thread tile loops (P1T) of "sched" by gen->block_dim[i].
972 static __isl_give isl_union_map
*scale_thread_tile_loops(struct cuda_gen
*gen
,
973 __isl_take isl_union_map
*sched
)
977 isl_basic_map
*scale
;
981 if (gen
->options
->wrap
)
983 if (!gen
->options
->scale_tile_loops
)
986 dim
= isl_union_map_get_space(sched
);
987 dim
= isl_space_add_dims(dim
, isl_dim_in
, gen
->thread_tiled_len
);
988 dim
= isl_space_add_dims(dim
, isl_dim_out
, gen
->thread_tiled_len
);
989 scale
= isl_basic_map_universe(isl_space_copy(dim
));
990 ls
= isl_local_space_from_space(dim
);
992 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
) {
995 if (i
>= gen
->shared_len
&&
996 i
< gen
->shared_len
+ gen
->n_block
)
997 f
= gen
->block_dim
[i
- gen
->shared_len
];
999 c
= isl_equality_alloc(isl_local_space_copy(ls
));
1000 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
1001 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
1002 scale
= isl_basic_map_add_constraint(scale
, c
);
1005 isl_local_space_free(ls
);
1007 sched
= isl_union_map_apply_range(sched
,
1008 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
1013 /* If we are not performing "wrapping" and if the user asked for it,
1014 * scale the "n_tile" loops starting at "first" of "sched" by gen->block_dim[i].
1016 static __isl_give isl_union_map
*scale_access_tile_loops(struct cuda_gen
*gen
,
1017 __isl_take isl_union_map
*sched
, int len
, int first
, int n_tile
)
1021 isl_basic_map
*scale
;
1023 isl_local_space
*ls
;
1025 if (gen
->options
->wrap
)
1027 if (!gen
->options
->scale_tile_loops
)
1030 dim
= isl_union_map_get_space(sched
);
1031 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
1032 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
);
1033 scale
= isl_basic_map_universe(isl_space_copy(dim
));
1034 ls
= isl_local_space_from_space(dim
);
1036 for (i
= 0; i
< len
; ++i
) {
1039 if (i
>= first
&& i
< first
+ n_tile
)
1040 f
= gen
->block_dim
[i
- first
];
1042 c
= isl_equality_alloc(isl_local_space_copy(ls
));
1043 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
1044 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
1045 scale
= isl_basic_map_add_constraint(scale
, c
);
1048 isl_local_space_free(ls
);
1050 sched
= isl_union_map_apply_range(sched
,
1051 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
1056 /* If print_user_stmt is set, we want to print the statements ourselves,
1057 * instead of relying on the C preprocessor. If so, we need to use
1058 * the stop option so that the domains will be saved on the statement
1061 static void print_cloog_shared_body(struct cuda_gen
*gen
,
1062 __isl_keep isl_set
*context
, __isl_keep isl_union_map
*sched
, int len
,
1063 void (*print_user_stmt
)(struct gpucode_info
*info
,
1064 struct clast_user_stmt
*s
),
1068 CloogOptions
*options
;
1069 CloogDomain
*cloog_context
;
1070 CloogUnionDomain
*ud
;
1072 struct clast_stmt
*stmt
;
1075 sched
= isl_union_map_copy(sched
);
1076 sched
= isl_union_map_align_params(sched
, isl_set_get_space(context
));
1078 options
= cloog_options_malloc(gen
->state
);
1079 options
->language
= CLOOG_LANGUAGE_C
;
1080 options
->strides
= 1;
1084 options
->override
= 1;
1085 options
->save_domains
= 1;
1086 options
->noscalars
= 1;
1087 options
->first_unroll
= first_unroll
;
1089 ud
= cloog_union_domain_from_isl_union_map(sched
);
1090 for (i
= 0; i
< len
; ++i
) {
1091 snprintf(name
, sizeof(name
), "c%d", i
);
1092 ud
= cloog_union_domain_set_name(ud
, CLOOG_SCAT
, i
, name
);
1094 cloog_context
= cloog_domain_from_isl_set(isl_set_copy(context
));
1095 input
= cloog_input_alloc(cloog_context
, ud
);
1097 stmt
= cloog_clast_create_from_input(input
, options
);
1099 gen
->stmt_code
.indent
= gen
->kernel_code
.indent
;
1100 gen
->stmt_code
.dst
= gen
->cuda
.kernel_c
;
1101 gen
->stmt_code
.print_user_stmt
= print_user_stmt
;
1102 gen
->stmt_code
.print_user_stmt_list
= NULL
;
1103 gen
->stmt_code
.print_for_head
= NULL
;
1104 gen
->stmt_code
.print_for_foot
= NULL
;
1105 gen
->stmt_code
.user
= gen
;
1106 gpu_print_host_stmt(&gen
->stmt_code
, stmt
);
1108 cloog_clast_free(stmt
);
1109 cloog_options_free(options
);
1112 /* Add "len" parameters p[i] called prefix%d,
1113 * with bounds to 0 <= p[i] < size[i].
1115 __isl_give isl_set
*add_bounded_parameters(__isl_take isl_set
*set
,
1116 int len
, int *size
, const char *prefix
)
1122 isl_basic_set
*bset
;
1124 isl_local_space
*ls
;
1127 nparam
= isl_set_dim(set
, isl_dim_param
);
1128 set
= isl_set_add_dims(set
, isl_dim_param
, len
);
1130 for (i
= 0; i
< len
; ++i
) {
1131 snprintf(name
, sizeof(name
), "%s%d", prefix
, i
);
1132 set
= isl_set_set_dim_name(set
, isl_dim_param
,
1136 dim
= isl_set_get_space(set
);
1137 bset
= isl_basic_set_universe(isl_space_copy(dim
));
1138 ls
= isl_local_space_from_space(dim
);
1142 for (i
= 0; i
< len
; ++i
) {
1143 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
1144 isl_int_set_si(v
, 1);
1145 isl_constraint_set_coefficient(c
, isl_dim_param
, nparam
+ i
, v
);
1146 bset
= isl_basic_set_add_constraint(bset
, c
);
1148 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
1149 isl_int_set_si(v
, -1);
1150 isl_constraint_set_coefficient(c
, isl_dim_param
, nparam
+ i
, v
);
1151 isl_int_set_si(v
, size
[i
] - 1);
1152 isl_constraint_set_constant(c
, v
);
1153 bset
= isl_basic_set_add_constraint(bset
, c
);
1157 isl_local_space_free(ls
);
1159 return isl_set_intersect(set
, isl_set_from_basic_set(bset
));
1162 static void print_shared_body(struct cuda_gen
*gen
,
1163 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*sched
,
1164 int len
, void (*print_user_stmt
)(struct gpucode_info
*info
,
1165 struct clast_user_stmt
*s
),
1170 context
= isl_set_copy(shared_domain
);
1171 context
= parametrize(context
, 0, gen
->shared_len
, "g");
1172 context
= isl_set_project_out(context
, isl_dim_set
, 0, gen
->shared_len
);
1173 context
= add_bounded_parameters(context
,
1174 gen
->n_block
, gen
->block_dim
, "t");
1176 print_cloog_shared_body(gen
, context
, sched
, len
, print_user_stmt
,
1179 isl_set_free(context
);
1182 /* Given a tile of an array, construct a map that maps each element
1183 * of the tile to a copy of the tile shifted to the origin
1184 * (based on the lower bounds in group->private_bound or group->shared_bound).
1185 * If any of the indices is strided, then {private,shared}_bound[i].shift_map
1186 * is applied to the index first.
1187 * The domain of the resulting map is "access",
1188 * while the range space is anonymous.
1190 static __isl_give isl_map
*shift_access(__isl_take isl_set
*access
,
1191 struct cuda_array_ref_group
*group
)
1195 isl_basic_set
*bset
;
1196 isl_basic_map
*bmap
;
1198 isl_basic_set
*offset
;
1199 isl_basic_map
*shift
;
1200 isl_basic_map
*pre_shift
;
1203 struct cuda_array_bound
*bounds
;
1204 int n_index
= group
->array
->n_index
;
1206 bounds
= group
->private_bound
;
1208 bounds
= group
->shared_bound
;
1210 dim
= isl_set_get_space(access
);
1211 dim
= isl_space_drop_dims(dim
, isl_dim_set
, 0, n_index
);
1212 offset
= isl_basic_set_universe(dim
);
1213 for (i
= 0; i
< n_index
; ++i
) {
1214 lb
= isl_aff_copy(bounds
[i
].lb
);
1215 bmap
= isl_basic_map_from_aff(lb
);
1216 bset
= isl_basic_map_range(bmap
);
1217 offset
= isl_basic_set_flat_product(offset
, bset
);
1219 offset
= isl_basic_set_neg(offset
);
1221 dim
= isl_space_map_from_set(isl_set_get_space(access
));
1222 shift
= isl_basic_map_identity(dim
);
1223 shift
= isl_basic_map_set_tuple_name(shift
, isl_dim_out
, NULL
);
1225 bset
= isl_basic_set_universe(isl_set_get_space(access
));
1226 bmap
= isl_basic_map_from_domain_and_range(bset
, offset
);
1228 shift
= isl_basic_map_sum(shift
, bmap
);
1230 dim
= isl_set_get_space(access
);
1231 dim
= isl_space_drop_dims(dim
, isl_dim_set
, 0, n_index
);
1232 dim
= isl_space_map_from_set(dim
);
1233 pre_shift
= isl_basic_map_universe(isl_space_copy(dim
));
1234 dim
= isl_space_add_dims(dim
, isl_dim_in
, 1);
1235 dim
= isl_space_add_dims(dim
, isl_dim_out
, 1);
1236 for (i
= 0; i
< n_index
; ++i
) {
1237 if (!bounds
[i
].shift_map
)
1238 bmap
= isl_basic_map_identity(isl_space_copy(dim
));
1240 bmap
= isl_basic_map_copy(bounds
[i
].shift_map
);
1241 pre_shift
= isl_basic_map_flat_product(pre_shift
, bmap
);
1243 isl_space_free(dim
);
1244 name
= isl_basic_map_get_tuple_name(shift
, isl_dim_in
);
1245 pre_shift
= isl_basic_map_set_tuple_name(pre_shift
, isl_dim_in
, name
);
1246 pre_shift
= isl_basic_map_set_tuple_name(pre_shift
, isl_dim_out
, name
);
1247 shift
= isl_basic_map_apply_range(pre_shift
, shift
);
1249 sched
= isl_map_from_basic_map(shift
);
1250 sched
= isl_map_intersect_domain(sched
, access
);
1255 /* Construct a schedule for iterating over all elements in the given
1256 * piece of an array. The schedule iterates over a copy of the piece
1257 * that is shifted to the origin.
1258 * We subsequently also perform the tiling/wrapping over the threads.
1260 * In particular, we tile the final iterators so that the final thread
1261 * dimension runs over the final array dimension.
1262 * However, if those final iterators have only a single iteration,
1263 * we try to tile earlier iterators instead.
1265 static __isl_give isl_union_map
*access_schedule(struct cuda_gen
*gen
,
1266 __isl_take isl_set
*access
, struct cuda_array_ref_group
*group
)
1270 isl_union_map
*usched
;
1273 unsigned nvar
= isl_set_dim(access
, isl_dim_set
);
1277 sched
= shift_access(access
, group
);
1279 n_tile
= gen
->n_block
;
1280 if (n_tile
> nvar
) {
1282 sched
= isl_map_insert_dims(sched
,
1283 isl_dim_out
, 0, n_tile
- nvar
);
1284 for (i
= 0; i
< n_tile
- nvar
; ++i
)
1285 sched
= isl_map_fix_si(sched
, isl_dim_out
, i
, 0);
1289 first
= nvar
- n_tile
;
1291 for (; first
> 0; first
--)
1292 if (!isl_map_plain_is_fixed(sched
, isl_dim_out
,
1293 first
+ n_tile
- 1, NULL
))
1296 dim
= isl_map_get_space(sched
);
1297 dim
= isl_space_params(dim
);
1298 if (gen
->options
->wrap
)
1299 tiling
= wrap(isl_space_copy(dim
), nvar
, first
,
1300 n_tile
, gen
->block_dim
);
1302 tiling
= tile(isl_space_copy(dim
), nvar
, first
,
1303 n_tile
, gen
->block_dim
);
1304 sched
= isl_map_apply_range(sched
, tiling
);
1306 par
= parametrization(dim
, nvar
+ n_tile
, first
+ n_tile
, n_tile
, "t");
1307 usched
= isl_union_map_from_map(sched
);
1308 usched
= isl_union_map_intersect_range(usched
,
1309 isl_union_set_from_set(par
));
1311 usched
= scale_access_tile_loops(gen
, usched
, nvar
+ n_tile
,
1317 static void print_shared_access(struct cuda_gen
*gen
,
1318 __isl_keep isl_set
*shared_domain
, __isl_take isl_set
*access
,
1319 const char *type
, struct cuda_array_ref_group
*group
)
1321 const char *array_name
;
1324 isl_union_map
*sched
;
1325 unsigned nvar
= isl_set_dim(access
, isl_dim_set
);
1328 ctx
= isl_set_get_ctx(access
);
1329 array_name
= isl_set_get_tuple_name(access
);
1330 name
= isl_alloc_array(ctx
, char,
1331 strlen(type
) + sizeof("_shared_") + strlen(array_name
) + 20);
1332 if (group
->array
->n_group
> 1)
1333 sprintf(name
, "%s_shared_%s_%d", type
, array_name
, group
->nr
);
1335 sprintf(name
, "%s_shared_%s", type
, array_name
);
1336 access
= isl_set_set_tuple_name(access
, name
);
1339 sched
= access_schedule(gen
, access
, group
);
1341 n_tile
= gen
->n_block
;
1345 print_shared_body(gen
, shared_domain
, sched
, nvar
+ n_tile
, NULL
, -1);
1347 isl_union_map_free(sched
);
1350 /* Return the union of all read (read = 1) and/or write (write = 1)
1351 * access relations in the group.
1353 static __isl_give isl_union_map
*group_access_relation(
1354 struct cuda_array_ref_group
*group
, int read
, int write
)
1357 isl_union_map
*access
;
1359 access
= isl_union_map_empty(isl_map_get_space(group
->access
));
1360 for (i
= 0; i
< group
->n_ref
; ++i
) {
1363 if (!((read
&& group
->refs
[i
]->read
) ||
1364 (write
&& group
->refs
[i
]->write
)))
1366 map_i
= isl_map_copy(group
->refs
[i
]->access
);
1367 access
= isl_union_map_union(access
,
1368 isl_union_map_from_map(map_i
));
1374 /* Check that none of the shared memory tiles involve any strides.
1376 static int no_strides(struct cuda_array_ref_group
*group
)
1379 int n_index
= group
->array
->n_index
;
1381 for (i
= 0; i
< n_index
; ++i
)
1382 if (group
->shared_bound
[i
].shift
)
1388 /* Return a set containing the values of the given index i
1389 * of the elements in the array tile in global memory that corresponds
1390 * to the shared memory copy.
1391 * In particular, if a is the index, we return a set with constraints
1393 * tile_offset <= a <= tile_offset + tile_size - 1
1397 * 0 <= a <= array_size - 1
1400 static __isl_give isl_set
*group_tile_dim(struct cuda_array_ref_group
*group
,
1403 isl_basic_set
*tile
;
1406 isl_local_space
*ls
;
1411 aff
= isl_aff_copy(group
->shared_bound
[i
].lb
);
1412 aff
= isl_aff_add_dims(aff
, isl_dim_in
, 1);
1413 ls
= isl_aff_get_domain_local_space(aff
);
1414 aff
= isl_aff_neg(aff
);
1415 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
1416 c
= isl_inequality_from_aff(isl_aff_copy(aff
));
1417 tile
= isl_basic_set_from_constraint(c
);
1419 aff
= isl_aff_neg(aff
);
1420 aff
= isl_aff_add_constant(aff
, group
->shared_bound
[i
].size
);
1421 aff
= isl_aff_add_constant_si(aff
, -1);
1422 c
= isl_inequality_from_aff(aff
);
1423 tile
= isl_basic_set_add_constraint(tile
, c
);
1425 aff
= isl_aff_zero_on_domain(ls
);
1426 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
1427 c
= isl_inequality_from_aff(aff
);
1428 tile
= isl_basic_set_add_constraint(tile
, c
);
1430 bound
= isl_pw_aff_copy(group
->array
->bound
[i
]);
1431 bound
= isl_pw_aff_add_dims(bound
, isl_dim_in
, 1);
1432 ls
= isl_local_space_from_space(isl_pw_aff_get_domain_space(bound
));
1433 aff
= isl_aff_zero_on_domain(ls
);
1434 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
1435 aff
= isl_aff_add_constant_si(aff
, 1);
1436 dom
= isl_pw_aff_domain(isl_pw_aff_copy(bound
));
1438 tile_set
= isl_pw_aff_ge_set(bound
, isl_pw_aff_alloc(dom
, aff
));
1439 tile_set
= isl_set_align_params(tile_set
, isl_basic_set_get_space(tile
));
1440 tile_set
= isl_set_intersect(tile_set
, isl_set_from_basic_set(tile
));
1445 /* Return a set containing the elements in the array tile in
1446 * global memory that corresponds to the shared memory copy.
1448 static __isl_give isl_set
*group_tile(struct cuda_array_ref_group
*group
)
1451 int n_index
= group
->array
->n_index
;
1454 tile
= group_tile_dim(group
, 0);
1455 for (i
= 1; i
< n_index
; ++i
) {
1458 tile_i
= group_tile_dim(group
, i
);
1459 tile
= isl_set_flat_product(tile
, tile_i
);
1462 tile
= isl_set_set_tuple_name(tile
, group
->array
->name
);
1467 /* Print code for reading into or writing from shared memory
1468 * the given array reference group.
1470 * sched maps the original iteration domains to the shared memory tile loops.
1472 * If we are performing a read from global memory to shared memory,
1473 * if the array involved is not a scalar and if the definition of the
1474 * shared memory tiles does not involve any strides, then we copy
1475 * the entire tile to shared memory. This may result in some extra
1476 * elements getting copied, but it should lead to simpler code
1477 * (which means that fewer registers may be needed) and less divergence.
1479 * Otherwise, we only copy the elements that will be read or have been written
1482 * Note that the absence of stride requirement can easily be lifted.
1483 * We would just need to add constraints of the form
1485 * shift + a = stride * alpha
1487 static int print_group_shared_accesses(struct cuda_gen
*gen
,
1488 struct cuda_array_ref_group
*group
, const char *type
,
1489 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*sched
)
1492 isl_union_map
*access
;
1493 isl_union_set
*uset
;
1494 isl_set
*access_set
;
1496 if (group
->private_bound
)
1498 if (!group
->shared_bound
)
1501 read
= !strcmp(type
, "read");
1503 access
= group_access_relation(group
, read
, !read
);
1504 access
= isl_union_map_apply_domain(access
, isl_union_map_copy(sched
));
1505 uset
= isl_union_map_range(access
);
1507 if (isl_union_set_is_empty(uset
)) {
1508 isl_union_set_free(uset
);
1512 if (read
&& group
->array
->n_index
> 0 && no_strides(group
)) {
1513 isl_union_set_free(uset
);
1514 access_set
= group_tile(group
);
1515 print_shared_access(gen
, shared_domain
, access_set
,
1520 access_set
= isl_set_from_union_set(uset
);
1521 access_set
= isl_set_coalesce(access_set
);
1523 print_shared_access(gen
, shared_domain
, access_set
, type
, group
);
1528 /* Print code for reading into or writing from shared memory at
1529 * the given level (-1 for innermost).
1531 * If we are not printing at the innermost level, then the dimensionality
1532 * of shared_domain may be smaller than gen->shared_len.
1533 * As the rest of the code assumes that the domain of access has
1534 * gen->shared_len dimensions, we therefore may need to embed this domain
1535 * in a higher dimensional space after intersection with shared_domain.
1537 static void print_shared_accesses(struct cuda_gen
*gen
,
1538 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*access
,
1539 const char *type
, int level
)
1545 int shared_len
= isl_set_dim(shared_domain
, isl_dim_set
);
1547 isl_union_map
*sched
;
1549 shared_domain
= isl_set_copy(shared_domain
);
1550 sched
= isl_union_map_copy(gen
->tiled_sched
);
1551 dim
= isl_union_map_get_space(sched
);
1552 proj
= projection(dim
, gen
->tiled_len
, shared_len
);
1553 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
1554 sched
= isl_union_map_intersect_range(sched
,
1555 isl_union_set_from_set(isl_set_copy(shared_domain
)));
1556 if (shared_len
!= gen
->shared_len
) {
1557 dim
= isl_union_map_get_space(sched
);
1558 proj
= projection(dim
, gen
->shared_len
, shared_len
);
1559 proj
= isl_map_reverse(proj
);
1560 shared_domain
= isl_set_apply(shared_domain
,
1561 isl_map_copy(proj
));
1562 sched
= isl_union_map_apply_range(sched
,
1563 isl_union_map_from_map(proj
));
1566 dim
= isl_union_map_get_space(sched
);
1567 par
= parametrization(dim
, gen
->shared_len
, 0, gen
->shared_len
, "g");
1568 sched
= isl_union_map_intersect_range(sched
,
1569 isl_union_set_from_set(par
));
1571 for (i
= 0; i
< gen
->n_array
; ++i
) {
1572 struct cuda_array_info
*array
= &gen
->array
[i
];
1574 if (gen
->array
[i
].print_shared_level
!= level
)
1577 for (j
= 0; j
< array
->n_group
; ++j
) {
1578 if (print_group_shared_accesses(gen
, array
->groups
[j
],
1579 type
, shared_domain
, sched
))
1584 isl_union_map_free(sched
);
1585 isl_set_free(shared_domain
);
1588 print_indent(gen
->cuda
.kernel_c
, gen
->kernel_code
.indent
);
1589 fprintf(gen
->cuda
.kernel_c
, "__syncthreads();\n");
1593 /* Given an index expression into a tile of an array, adjust the expression
1594 * to a shift of the tile to the origin
1595 * (based on the lower bounds in array->shared_bound).
1596 * If the index is strided, then we first add
1597 * bound->shift and divide by bound->stride.
1599 static __isl_give isl_qpolynomial
*shift_index(__isl_take isl_qpolynomial
*qp
,
1600 struct cuda_array_info
*array
,
1601 struct cuda_array_bound
*bound
, __isl_take isl_set
*domain
)
1603 isl_qpolynomial
*lb
;
1606 isl_qpolynomial
*shift
, *t
;
1609 shift
= bound
->shift
;
1610 shift
= isl_qpolynomial_copy(shift
);
1611 shift
= isl_qpolynomial_project_domain_on_params(shift
);
1612 shift
= isl_qpolynomial_align_params(shift
,
1613 isl_qpolynomial_get_space(qp
));
1614 qp
= isl_qpolynomial_add(qp
, shift
);
1615 dim
= isl_qpolynomial_get_domain_space(qp
);
1617 isl_int_set_si(one
, 1);
1618 t
= isl_qpolynomial_rat_cst_on_domain(dim
, one
, bound
->stride
);
1620 qp
= isl_qpolynomial_mul(qp
, t
);
1623 lb
= isl_qpolynomial_from_aff(isl_aff_copy(bound
->lb
));
1624 lb
= isl_qpolynomial_project_domain_on_params(lb
);
1626 lb
= isl_qpolynomial_align_params(lb
, isl_qpolynomial_get_space(qp
));
1628 qp
= isl_qpolynomial_sub(qp
, lb
);
1629 qp
= isl_qpolynomial_gist(qp
, domain
);
1634 /* This function is called for each access to an array in some statement
1635 * in the original code.
1636 * Replace that access by an access to shared or (linearized) global memory.
1637 * Since the array in shared memory is just
1638 * a shifted copy of part of the original array, we simply need
1639 * to subtract the lower bound, which was computed
1640 * in can_tile_for_shared_memory.
1641 * If any of the indices is strided, then we first add
1642 * shared_bound[i].shift and divide by shared_bound[i].stride.
1644 * If the given array is accessed directly from global memory,
1645 * we don't need to perform any shifting and simply simplify
1646 * expression in the context of the domain instead.
1648 * If the array space (range of access) has no name, then we are
1649 * accessing an iterator in the original program.
1651 static void print_access(struct cuda_gen
*gen
, __isl_take isl_map
*access
,
1657 struct cuda_array_info
*array
= NULL
;
1662 struct cuda_array_bound
*bounds
= NULL
;
1664 access
= isl_map_align_params(access
,
1665 isl_set_get_space(gen
->stmt_domain
));
1667 data_set
= isl_set_apply(isl_set_copy(gen
->stmt_domain
), access
);
1669 name
= isl_set_get_tuple_name(data_set
);
1672 fprintf(gen
->cuda
.kernel_c
, "(");
1674 struct cuda_array_ref_group
*group
;
1676 for (i
= 0; i
< gen
->n_array
; ++i
) {
1677 if (strcmp(name
, gen
->array
[i
].name
))
1679 array
= &gen
->array
[i
];
1682 group
= array
->groups
[group_nr
];
1683 bounds
= group
->private_bound
;
1685 bounds
= group
->shared_bound
;
1687 print_array_name(gen
->cuda
.kernel_c
, group
);
1689 if (cuda_array_is_scalar(array
)) {
1690 isl_set_free(data_set
);
1694 fprintf(gen
->cuda
.kernel_c
, "[");
1698 n_index
= isl_set_dim(data_set
, isl_dim_set
);
1699 aff
= isl_set_affine_hull(data_set
);
1701 prn
= isl_printer_to_file(gen
->ctx
, gen
->cuda
.kernel_c
);
1702 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
1705 for (i
= 0; i
+ 1 < n_index
; ++i
)
1706 prn
= isl_printer_print_str(prn
, "(");
1708 for (i
= 0; i
< n_index
; ++i
) {
1710 isl_qpolynomial
*qp
;
1713 ok
= isl_basic_set_has_defining_equality(aff
,
1714 isl_dim_out
, i
, &c
);
1716 qp
= isl_qpolynomial_from_constraint(c
, isl_dim_out
, i
);
1717 qp
= isl_qpolynomial_project_domain_on_params(qp
);
1720 prn
= isl_printer_print_qpolynomial(prn
, qp
);
1721 isl_qpolynomial_free(qp
);
1725 domain
= isl_set_copy(gen
->stmt_domain
);
1726 domain
= isl_set_project_out(domain
, isl_dim_set
, 0,
1727 isl_set_dim(domain
, isl_dim_set
));
1729 qp
= isl_qpolynomial_gist(qp
, domain
);
1731 qp
= shift_index(qp
, array
, &bounds
[i
], domain
);
1735 prn
= isl_printer_print_str(prn
, ") * (");
1736 prn
= isl_printer_print_pw_aff(prn
,
1737 array
->local_bound
[i
]);
1738 prn
= isl_printer_print_str(prn
, ") + ");
1740 prn
= isl_printer_print_str(prn
, "][");
1742 prn
= isl_printer_print_qpolynomial(prn
, qp
);
1743 isl_qpolynomial_free(qp
);
1746 prn
= isl_printer_print_str(prn
, ")");
1748 prn
= isl_printer_print_str(prn
, "]");
1749 isl_printer_free(prn
);
1751 isl_basic_set_free(aff
);
1754 static struct cuda_stmt_access
*print_expr(struct cuda_gen
*gen
, FILE *out
,
1755 struct pet_expr
*expr
, struct cuda_stmt_access
*access
, int outer
)
1759 switch (expr
->type
) {
1760 case pet_expr_double
:
1761 fprintf(out
, "%g", expr
->d
);
1763 case pet_expr_access
:
1764 print_access(gen
, isl_map_copy(access
->access
), access
->group
);
1765 access
= access
->next
;
1767 case pet_expr_unary
:
1770 fprintf(out
, " %s ", pet_op_str(expr
->op
));
1771 access
= print_expr(gen
, out
, expr
->args
[pet_un_arg
],
1776 case pet_expr_binary
:
1779 access
= print_expr(gen
, out
, expr
->args
[pet_bin_lhs
],
1781 fprintf(out
, " %s ", pet_op_str(expr
->op
));
1782 access
= print_expr(gen
, out
, expr
->args
[pet_bin_rhs
],
1787 case pet_expr_ternary
:
1790 access
= print_expr(gen
, out
, expr
->args
[pet_ter_cond
],
1792 fprintf(out
, " ? ");
1793 access
= print_expr(gen
, out
, expr
->args
[pet_ter_true
],
1795 fprintf(out
, " : ");
1796 access
= print_expr(gen
, out
, expr
->args
[pet_ter_false
],
1802 fprintf(out
, "%s(", expr
->name
);
1803 for (i
= 0; i
< expr
->n_arg
; ++i
) {
1806 access
= print_expr(gen
, out
, expr
->args
[i
],
1814 static void print_stmt_body(struct cuda_gen
*gen
,
1815 FILE *out
, struct cuda_stmt
*stmt
)
1817 print_expr(gen
, out
, stmt
->body
, stmt
->accesses
, 1);
1818 fprintf(out
, ";\n");
1821 /* This function is called for each leaf in the innermost clast,
1822 * i.e., for each statemetn.
1823 * We print the statement body, simplifying the accesses based
1826 static void print_statement(struct gpucode_info
*code
,
1827 struct clast_user_stmt
*u
)
1829 struct cuda_gen
*gen
= code
->user
;
1832 isl_set
*stmt_domain
;
1833 isl_union_map
*stmt_sched
;
1834 isl_union_set
*uset
;
1836 struct cuda_stmt
*stmt
;
1838 nr
= atoi(u
->statement
->name
+ 2);
1839 stmt
= &gen
->stmts
[nr
];
1841 stmt_domain
= extract_host_domain(u
);
1843 stmt_sched
= isl_union_map_intersect_range(
1844 isl_union_map_copy(gen
->local_sched
),
1845 isl_union_set_from_set(extend(stmt_domain
,
1846 gen
->thread_tiled_len
)));
1847 dim
= isl_union_map_get_space(stmt_sched
);
1848 par
= parametrization(dim
, gen
->thread_tiled_len
, 0,
1849 gen
->thread_tiled_len
, "c");
1850 stmt_sched
= isl_union_map_intersect_range(stmt_sched
,
1851 isl_union_set_from_set(par
));
1853 uset
= isl_union_map_domain(stmt_sched
);
1854 dim
= isl_union_set_get_space(uset
);
1855 dim
= isl_space_add_dims(dim
, isl_dim_set
,
1856 isl_set_dim(stmt
->domain
, isl_dim_set
));
1857 dim
= isl_space_set_tuple_name(dim
, isl_dim_set
, u
->statement
->name
);
1858 gen
->stmt_domain
= isl_union_set_extract_set(uset
, dim
);
1859 isl_union_set_free(uset
);
1861 print_indent(code
->dst
, code
->indent
);
1862 print_stmt_body(gen
, code
->dst
, stmt
);
1864 isl_set_free(gen
->stmt_domain
);
1867 /* Print an access to the element in the global memory copy of the
1868 * given array that corresponds to element [qp[0]][qp[1]]...
1869 * of the original array.
1870 * The copy in global memory has been linearized, so we need to take
1871 * the array size into account.
1873 static void print_private_global_index(isl_ctx
*ctx
, FILE *out
,
1874 struct cuda_array_info
*array
, __isl_keep isl_qpolynomial
**qp
)
1879 fprintf(out
, "%s[", array
->name
);
1880 prn
= isl_printer_to_file(ctx
, out
);
1881 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
1882 for (i
= 0; i
+ 1 < array
->n_index
; ++i
)
1883 prn
= isl_printer_print_str(prn
, "(");
1884 for (i
= 0; i
< array
->n_index
; ++i
) {
1886 prn
= isl_printer_print_str(prn
, ") * (");
1887 prn
= isl_printer_print_pw_aff(prn
,
1888 array
->local_bound
[i
]);
1889 prn
= isl_printer_print_str(prn
, ") + ");
1891 prn
= isl_printer_print_qpolynomial(prn
, qp
[i
]);
1893 isl_printer_free(prn
);
1897 /* Print an access to the element in the shared memory copy of the
1898 * given array reference group that corresponds to element [qps[0]][qps[1]]...
1899 * of the original array.
1900 * Since the array in shared memory is just a shifted copy of part
1901 * of the original array, we simply need to subtract the lower bound,
1902 * which was computed in can_tile_for_shared_memory.
1903 * If any of the indices is strided, then we first add
1904 * shared_bound[i].shift and divide by shared_bound[i].stride.
1906 static void print_private_local_index(isl_ctx
*ctx
, FILE *out
,
1907 struct cuda_array_ref_group
*group
,
1908 __isl_keep isl_qpolynomial
**qps
, __isl_keep isl_set
*domain
)
1912 struct cuda_array_info
*array
= group
->array
;
1913 struct cuda_array_bound
*bounds
= group
->private_bound
;
1915 print_array_name(out
, group
);
1916 for (i
= 0; i
< array
->n_index
; ++i
) {
1917 isl_qpolynomial
*qp
= isl_qpolynomial_copy(qps
[i
]);
1919 qp
= shift_index(qp
, array
, &bounds
[i
], isl_set_copy(domain
));
1922 prn
= isl_printer_to_file(ctx
, out
);
1923 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
1924 prn
= isl_printer_print_qpolynomial(prn
, qp
);
1925 isl_printer_free(prn
);
1927 isl_qpolynomial_free(qp
);
1931 /* This function is called for each leaf in the clast of the code
1932 * for copying to or from private memory.
1933 * The statement name is read_private_<array> or write_private_<array>.
1935 * The schedule iterates over the array elements, so we can use
1936 * the domain of private_sched at the current scheduling position
1937 * as the index of the array.
1939 static void print_private_copy_statement(struct gpucode_info
*code
,
1940 struct clast_user_stmt
*u
)
1942 struct cuda_gen
*gen
= code
->user
;
1945 struct cuda_array_ref_group
*group
= gen
->private_group
;
1954 isl_qpolynomial
**qp
;
1957 read
= !strncmp(u
->statement
->name
, "read", 4);
1959 domain
= extract_host_domain(u
);
1962 sched
= isl_map_copy(gen
->private_sched
);
1963 sched
= isl_map_reverse(sched
);
1964 sched
= isl_map_intersect_domain(sched
, domain
);
1965 n_in
= isl_map_dim(sched
, isl_dim_in
);
1966 n_out
= isl_map_dim(sched
, isl_dim_out
);
1967 dim
= isl_map_get_space(sched
);
1968 dim
= isl_space_drop_dims(dim
, isl_dim_in
, 0, n_in
);
1969 dim
= isl_space_drop_dims(dim
, isl_dim_out
, 0, n_out
);
1970 param
= parametrization(dim
, n_in
, 0, n_in
, "c");
1971 sched
= isl_map_align_params(sched
, isl_set_get_space(param
));
1972 sched
= isl_map_intersect_domain(sched
, param
);
1973 index
= isl_map_range(sched
);
1974 domain
= isl_set_copy(index
);
1975 aff
= isl_set_affine_hull(index
);
1976 domain
= isl_set_project_out(domain
, isl_dim_set
, 0, n_out
);
1978 ctx
= isl_basic_set_get_ctx(aff
);
1979 qp
= isl_alloc_array(ctx
, isl_qpolynomial
*, n_out
);
1982 for (i
= 0; i
< n_out
; ++i
) {
1986 ok
= isl_basic_set_has_defining_equality(aff
,
1987 isl_dim_set
, i
, &c
);
1989 qp
[i
] = isl_qpolynomial_from_constraint(c
, isl_dim_set
, i
);
1990 qp
[i
] = isl_qpolynomial_project_domain_on_params(qp
[i
]);
1993 print_indent(code
->dst
, code
->indent
);
1995 print_private_local_index(ctx
, code
->dst
, group
, qp
, domain
);
1996 fprintf(code
->dst
, " = ");
1997 print_private_global_index(ctx
, code
->dst
, group
->array
, qp
);
1999 print_private_global_index(ctx
, code
->dst
, group
->array
, qp
);
2000 fprintf(code
->dst
, " = ");
2001 print_private_local_index(ctx
, code
->dst
, group
, qp
, domain
);
2003 fprintf(code
->dst
, ";\n");
2005 for (i
= 0; i
< n_out
; ++i
)
2006 isl_qpolynomial_free(qp
[i
]);
2009 isl_basic_set_free(aff
);
2010 isl_set_free(domain
);
2013 static void print_private_access(struct cuda_gen
*gen
,
2014 __isl_keep isl_set
*shared_domain
, __isl_take isl_set
*access
,
2015 const char *type
, struct cuda_array_ref_group
*group
)
2017 const char *array_name
;
2020 unsigned nvar
= isl_set_dim(access
, isl_dim_set
);
2021 isl_union_map
*usched
;
2023 if (isl_set_fast_is_empty(access
)) {
2024 isl_set_free(access
);
2028 ctx
= isl_set_get_ctx(access
);
2029 array_name
= isl_set_get_tuple_name(access
);
2030 name
= isl_alloc_array(ctx
, char,
2031 strlen(type
) + sizeof("_private_") + strlen(array_name
) + 20);
2032 if (group
->array
->n_group
> 1)
2033 sprintf(name
, "%s_private_%s_%d", type
, array_name
, group
->nr
);
2035 sprintf(name
, "%s_private_%s", type
, array_name
);
2036 access
= isl_set_set_tuple_name(access
, name
);
2039 gen
->private_sched
= shift_access(access
, group
);
2040 gen
->private_group
= group
;
2042 usched
= isl_union_map_from_map(isl_map_copy(gen
->private_sched
));
2043 print_shared_body(gen
, shared_domain
, usched
, nvar
,
2044 &print_private_copy_statement
, 1);
2045 isl_union_map_free(usched
);
2047 isl_map_free(gen
->private_sched
);
2050 /* Print code for reading into or writing from private memory
2051 * the given array reference group.
2053 * sched maps the original iteration domains to the shared memory tile loops.
2055 static void print_group_private_accesses(struct cuda_gen
*gen
,
2056 struct cuda_array_ref_group
*group
,
2057 const char *type
, __isl_keep isl_set
*shared_domain
,
2058 unsigned first_shared
, int shared_len
, __isl_keep isl_union_map
*sched
)
2061 isl_union_map
*access
;
2062 isl_union_set
*uset
;
2063 isl_set
*access_set
;
2065 if (!group
->private_bound
)
2068 read
= !strcmp(type
, "read");
2070 access
= group_access_relation(group
, read
, !read
);
2071 access
= isl_union_map_apply_domain(access
, isl_union_map_copy(sched
));
2072 access
= isl_union_map_intersect(access
,
2073 isl_union_map_copy(gen
->private_access
));
2074 uset
= isl_union_map_range(access
);
2076 if (isl_union_set_is_empty(uset
)) {
2077 isl_union_set_free(uset
);
2081 access_set
= isl_set_from_union_set(uset
);
2082 access_set
= isl_set_coalesce(access_set
);
2083 access_set
= isl_set_eliminate(access_set
, isl_dim_param
,
2084 first_shared
+ shared_len
,
2085 gen
->shared_len
- shared_len
);
2087 print_private_access(gen
, shared_domain
, access_set
, type
, group
);
2090 /* Print code for reading into or writing from private memory at
2091 * the given level (-1 for innermost).
2093 * If we are not printing at the innermost level, then the dimensionality
2094 * of shared_domain may be smaller than gen->shared_len.
2095 * As the rest of the code assumes that the domain of access has
2096 * gen->shared_len dimensions, we therefore may need to embed this domain
2097 * in a higher dimensional space after intersection with shared_domain.
2099 * This code is very similar to print_shared_accesses.
2100 * The main difference is that we to take into account gen->private_access.
2102 static void print_private_accesses(struct cuda_gen
*gen
,
2103 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*access
,
2104 const char *type
, int level
)
2109 int shared_len
= isl_set_dim(shared_domain
, isl_dim_set
);
2110 unsigned first_shared
;
2111 isl_union_map
*sched
;
2113 shared_domain
= isl_set_copy(shared_domain
);
2114 sched
= isl_union_map_copy(gen
->tiled_sched
);
2115 dim
= isl_union_map_get_space(sched
);
2116 first_shared
= isl_space_dim(dim
, isl_dim_param
);
2117 proj
= projection(dim
, gen
->tiled_len
, shared_len
);
2118 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
2119 sched
= isl_union_map_intersect_range(sched
,
2120 isl_union_set_from_set(isl_set_copy(shared_domain
)));
2121 if (shared_len
!= gen
->shared_len
) {
2122 dim
= isl_union_map_get_space(sched
);
2123 proj
= projection(dim
, gen
->shared_len
, shared_len
);
2124 proj
= isl_map_reverse(proj
);
2125 shared_domain
= isl_set_apply(shared_domain
,
2126 isl_map_copy(proj
));
2127 sched
= isl_union_map_apply_range(sched
,
2128 isl_union_map_from_map(proj
));
2131 for (i
= 0; i
< gen
->n_array
; ++i
) {
2132 struct cuda_array_info
*array
= &gen
->array
[i
];
2134 if (gen
->array
[i
].print_shared_level
!= level
)
2137 for (j
= 0; j
< array
->n_group
; ++j
)
2138 print_group_private_accesses(gen
, array
->groups
[j
],
2139 type
, shared_domain
,
2140 first_shared
, shared_len
, sched
);
2143 isl_union_map_free(sched
);
2144 isl_set_free(shared_domain
);
2147 /* Set unroll[j] if the input dimension j is involved in
2148 * the index expression represented by bmap.
2150 static int check_unroll(__isl_take isl_basic_map
*bmap
, void *user
)
2153 int n_in
= isl_basic_map_dim(bmap
, isl_dim_in
);
2154 int n_out
= isl_basic_map_dim(bmap
, isl_dim_out
);
2157 for (i
= 0; i
< n_out
; ++i
) {
2161 ok
= isl_basic_map_has_defining_equality(bmap
,
2162 isl_dim_out
, i
, &c
);
2164 for (j
= 0; j
< n_in
; ++j
)
2165 if (isl_constraint_involves_dims(c
, isl_dim_in
, j
, 1))
2167 isl_constraint_free(c
);
2170 isl_basic_map_free(bmap
);
2174 /* Given an array pos mapping input dimensions to the corresponding
2175 * output dimension, construct the corresponding map.
2177 static __isl_give isl_map
*permutation(__isl_take isl_space
*dim
,
2182 isl_basic_map
*bmap
;
2183 isl_local_space
*ls
;
2185 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
2186 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
);
2187 bmap
= isl_basic_map_universe(isl_space_copy(dim
));
2188 ls
= isl_local_space_from_space(dim
);
2190 for (i
= 0; i
< len
; ++i
) {
2191 c
= isl_equality_alloc(isl_local_space_copy(ls
));
2192 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, -1);
2193 isl_constraint_set_coefficient_si(c
, isl_dim_out
, pos
[i
], 1);
2194 bmap
= isl_basic_map_add_constraint(bmap
, c
);
2196 isl_local_space_free(ls
);
2198 return isl_map_from_basic_map(bmap
);
2201 /* Find all loops involved in any of the index expressions for any of
2202 * the private accesses, move them innermost and then mark them as
2203 * requiring unrolling by setting gen->first_unroll.
2204 * The loops involved should all be parallel because of the checks
2205 * we performed in check_private_group_access. Moving them innermost
2206 * is therefore a valid transformation.
2208 static __isl_give isl_union_map
*interchange_for_unroll(struct cuda_gen
*gen
,
2209 __isl_take isl_union_map
*sched
)
2212 int unroll
[gen
->thread_tiled_len
];
2213 int perm
[gen
->thread_tiled_len
];
2216 int len
= gen
->shared_len
+ gen
->n_parallel
+ gen
->n_block
;
2218 gen
->first_unroll
= -1;
2220 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
)
2222 for (i
= 0; i
< gen
->n_array
; ++i
) {
2223 struct cuda_array_info
*array
= &gen
->array
[i
];
2225 for (j
= 0; j
< array
->n_group
; ++j
) {
2226 isl_union_map
*access
;
2229 if (!array
->groups
[j
]->private_bound
)
2232 access
= group_access_relation(array
->groups
[j
], 1, 1);
2233 access
= isl_union_map_apply_domain(access
,
2234 isl_union_map_copy(sched
));
2236 acc
= isl_map_from_union_map(access
);
2237 isl_map_foreach_basic_map(acc
, &check_unroll
, unroll
);
2243 for (i
= 0; i
< gen
->shared_len
; ++i
)
2247 for (i
= gen
->shared_len
; i
< len
; ++i
)
2254 for (i
= len
; i
< gen
->thread_tiled_len
; ++i
)
2259 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
)
2262 gen
->first_unroll
= 1 + j
;
2263 for (i
= 0; i
< len
; ++i
)
2267 dim
= isl_union_map_get_space(sched
);
2268 permute
= permutation(dim
, perm
, gen
->thread_tiled_len
);
2269 sched
= isl_union_map_apply_range(sched
,
2270 isl_union_map_from_map(permute
));
2275 /* This function is called for each leaf in the clast of the kernel code.
2276 * We first specialize the schedule to the site of the leaf and
2277 * print code for reading into shared memory, performing the actual
2278 * computations and writing from shared memory, with the required
2281 static void print_kernel_user(struct gpucode_info
*code
,
2282 struct clast_user_stmt
*u
)
2284 struct cuda_gen
*gen
= code
->user
;
2285 isl_set
*shared_domain
;
2287 shared_domain
= extract_entire_host_domain(u
);
2289 print_shared_accesses(gen
, shared_domain
, gen
->read
, "read", -1);
2291 print_private_accesses(gen
, shared_domain
, gen
->read
, "read", -1);
2293 print_shared_body(gen
, shared_domain
, gen
->local_sched
,
2294 gen
->thread_tiled_len
, &print_statement
,
2297 print_private_accesses(gen
, shared_domain
, gen
->write
, "write", -1);
2299 print_indent(gen
->cuda
.kernel_c
, gen
->kernel_code
.indent
);
2300 fprintf(gen
->cuda
.kernel_c
, "__syncthreads();\n");
2302 print_shared_accesses(gen
, shared_domain
, gen
->write
, "write", -1);
2304 isl_set_free(shared_domain
);
2307 /* Check if we need to perform any copying to shared memory at this level
2308 * and if so, print the copying instructions.
2309 * Any array for which we are allowed to print copying instructions at
2310 * this level, but haven't done so already, is printed.
2312 static void print_kernel_for_head(struct gpucode_info
*code
,
2313 struct clast_for
*f
)
2316 struct cuda_gen
*gen
= code
->user
;
2321 domain
= isl_set_from_cloog_domain(cloog_domain_copy(f
->domain
));
2322 level
= isl_set_dim(domain
, isl_dim_set
) - 1;
2324 for (i
= 0; i
< gen
->n_array
; ++i
) {
2325 if (gen
->array
[i
].print_shared_level
>= 0)
2327 if (gen
->array
[i
].last_shared
> level
)
2329 gen
->array
[i
].print_shared_level
= level
;
2334 print_shared_accesses(gen
, domain
, gen
->read
, "read", level
);
2335 print_private_accesses(gen
, domain
, gen
->read
, "read", level
);
2338 isl_set_free(domain
);
2341 /* Print instructions for copying from shared memory for each array
2342 * for which print_kernel_for_head has added copying instructions
2345 static void print_kernel_for_foot(struct gpucode_info
*code
,
2346 struct clast_for
*f
)
2349 struct cuda_gen
*gen
= code
->user
;
2354 domain
= isl_set_from_cloog_domain(cloog_domain_copy(f
->domain
));
2355 level
= isl_set_dim(domain
, isl_dim_set
) - 1;
2357 for (i
= 0; i
< gen
->n_array
; ++i
) {
2358 if (gen
->array
[i
].print_shared_level
!= level
)
2365 print_private_accesses(gen
, domain
, gen
->write
, "write", level
);
2366 print_shared_accesses(gen
, domain
, gen
->write
, "write", level
);
2369 isl_set_free(domain
);
2372 /* Use CLooG to generate code for the outer gen->shared_first loops
2373 * of the local schedule "sched".
2374 * The pretty printing of this code is handled by gpu_print_host_stmt,
2375 * which calls print_kernel_user for each iteration of the shared tile loops.
2377 static void print_cloog_kernel_body(struct cuda_gen
*gen
,
2378 __isl_keep isl_set
*context
, __isl_keep isl_union_map
*sched
)
2381 CloogOptions
*options
;
2382 CloogDomain
*cloog_context
;
2383 CloogUnionDomain
*ud
;
2385 struct clast_stmt
*stmt
;
2388 sched
= isl_union_map_copy(sched
);
2389 sched
= isl_union_map_align_params(sched
, isl_set_get_space(context
));
2391 options
= cloog_options_malloc(gen
->state
);
2392 options
->language
= CLOOG_LANGUAGE_C
;
2393 options
->strides
= 1;
2395 options
->stop
= gen
->shared_len
;
2396 options
->f
= gen
->tiled_len
;
2397 options
->l
= gen
->tiled_len
;
2398 options
->save_domains
= 1;
2399 options
->noscalars
= 1;
2401 ud
= cloog_union_domain_from_isl_union_map(sched
);
2402 for (i
= 0; i
< gen
->shared_len
; ++i
) {
2403 snprintf(name
, sizeof(name
), "g%d", i
);
2404 ud
= cloog_union_domain_set_name(ud
, CLOOG_SCAT
, i
, name
);
2406 cloog_context
= cloog_domain_from_isl_set(isl_set_copy(context
));
2407 input
= cloog_input_alloc(cloog_context
, ud
);
2409 stmt
= cloog_clast_create_from_input(input
, options
);
2411 gen
->kernel_code
.indent
= 4;
2412 gen
->kernel_code
.dst
= gen
->cuda
.kernel_c
;
2413 gen
->kernel_code
.print_user_stmt
= NULL
;
2414 gen
->kernel_code
.print_user_stmt_list
= &print_kernel_user
;
2415 gen
->kernel_code
.print_for_head
= &print_kernel_for_head
;
2416 gen
->kernel_code
.print_for_foot
= &print_kernel_for_foot
;
2417 gen
->kernel_code
.user
= gen
;
2418 gpu_print_host_stmt(&gen
->kernel_code
, stmt
);
2420 cloog_clast_free(stmt
);
2421 cloog_options_free(options
);
2424 static void print_kernel_iterators(struct cuda_gen
*gen
)
2427 const char *block_dims
[] = { "blockIdx.x", "blockIdx.y" };
2428 const char *thread_dims
[] = { "threadIdx.x", "threadIdx.y",
2431 if (gen
->n_grid
> 0) {
2432 print_indent(gen
->cuda
.kernel_c
, 4);
2433 fprintf(gen
->cuda
.kernel_c
, "int ");
2434 for (i
= 0; i
< gen
->n_grid
; ++i
) {
2436 fprintf(gen
->cuda
.kernel_c
, ", ");
2437 fprintf(gen
->cuda
.kernel_c
, "b%d = %s",
2438 i
, block_dims
[gen
->n_grid
- 1 - i
]);
2440 fprintf(gen
->cuda
.kernel_c
, ";\n");
2443 if (gen
->n_block
> 0) {
2444 print_indent(gen
->cuda
.kernel_c
, 4);
2445 fprintf(gen
->cuda
.kernel_c
, "int ");
2446 for (i
= 0; i
< gen
->n_block
; ++i
) {
2448 fprintf(gen
->cuda
.kernel_c
, ", ");
2449 fprintf(gen
->cuda
.kernel_c
, "t%d = %s",
2450 i
, thread_dims
[gen
->n_block
- 1 - i
]);
2452 fprintf(gen
->cuda
.kernel_c
, ";\n");
2456 static void print_group_shared_array(struct cuda_gen
*gen
,
2457 struct cuda_array_ref_group
*group
)
2460 struct cuda_array_bound
*bounds
;
2462 bounds
= group
->private_bound
;
2464 bounds
= group
->shared_bound
;
2468 print_indent(gen
->cuda
.kernel_c
, 4);
2469 fprintf(gen
->cuda
.kernel_c
, "%s%s ",
2470 group
->private_bound
? "" : "__shared__ ", group
->array
->type
);
2471 print_array_name(gen
->cuda
.kernel_c
, group
);
2472 for (j
= 0; j
< group
->array
->n_index
; ++j
) {
2473 fprintf(gen
->cuda
.kernel_c
, "[");
2474 isl_int_print(gen
->cuda
.kernel_c
, bounds
[j
].size
, 0);
2475 fprintf(gen
->cuda
.kernel_c
, "]");
2477 fprintf(gen
->cuda
.kernel_c
, ";\n");
2480 static void print_shared_arrays(struct cuda_gen
*gen
)
2484 for (i
= 0; i
< gen
->n_array
; ++i
) {
2485 struct cuda_array_info
*array
= &gen
->array
[i
];
2487 for (j
= 0; j
< array
->n_group
; ++j
)
2488 print_group_shared_array(gen
, array
->groups
[j
]);
2492 static void print_kernel_body(struct cuda_gen
*gen
,
2493 __isl_keep isl_set
*host_domain
, __isl_keep isl_union_map
*sched
)
2497 context
= isl_set_copy(host_domain
);
2498 context
= parametrize(context
, 0, gen
->tile_first
, "h");
2499 context
= isl_set_project_out(context
, isl_dim_set
, 0, gen
->tile_first
);
2500 context
= add_bounded_parameters(context
,
2501 gen
->n_grid
, gen
->grid_dim
, "b");
2503 print_kernel_iterators(gen
);
2504 print_shared_arrays(gen
);
2506 fprintf(gen
->cuda
.kernel_c
, "\n");
2508 print_cloog_kernel_body(gen
, context
, sched
);
2510 isl_set_free(context
);
2513 /* Given a constraint
2515 * a(p,i) + j = g f(e)
2517 * or -a(p,i) - j = g f(e) if sign < 0,
2518 * store a(p,i) in bound->shift and g (stride) in bound->stride.
2519 * a(p,i) is assumed to be an expression in only the parameters.
2521 static void extract_stride(__isl_keep isl_constraint
*c
,
2522 struct cuda_array_bound
*bound
, isl_int stride
, int sign
)
2529 isl_qpolynomial
*qp
;
2531 isl_int_set(bound
->stride
, stride
);
2533 dim
= isl_constraint_get_space(c
);
2534 dim
= isl_space_params(dim
);
2536 nparam
= isl_space_dim(dim
, isl_dim_param
);
2540 isl_int_set_si(one
, 1);
2542 isl_constraint_get_constant(c
, &v
);
2545 qp
= isl_qpolynomial_rat_cst_on_domain(isl_space_copy(dim
), v
, one
);
2547 for (i
= 0; i
< nparam
; ++i
) {
2548 isl_qpolynomial
*t
, *p
;
2550 isl_constraint_get_coefficient(c
, isl_dim_param
, i
, &v
);
2551 if (isl_int_is_zero(v
))
2555 t
= isl_qpolynomial_rat_cst_on_domain(isl_space_copy(dim
), v
, one
);
2556 p
= isl_qpolynomial_var_on_domain(isl_space_copy(dim
), isl_dim_param
, i
);
2557 t
= isl_qpolynomial_mul(t
, p
);
2558 qp
= isl_qpolynomial_add(qp
, t
);
2561 isl_space_free(dim
);
2568 /* Given an equality constraint of a map with a single output dimension j,
2569 * check if the constraint is of the form
2571 * a(p,i) + j = g f(e)
2573 * with a(p,i) an expression in the parameters and input dimensions
2574 * and f(e) an expression in the existentially quantified variables.
2575 * If so, and if g is larger than any such g from a previously considered
2576 * constraint, then call extract_stride. to record the stride information
2579 static int check_stride_constraint(__isl_take isl_constraint
*c
, void *user
)
2584 struct cuda_array_bound
*bound
= user
;
2587 isl_int_init(stride
);
2589 n_div
= isl_constraint_dim(c
, isl_dim_div
);
2590 isl_constraint_get_coefficient(c
, isl_dim_out
, 0, &v
);
2592 if (n_div
&& (isl_int_is_one(v
) || isl_int_is_negone(v
))) {
2593 int s
= isl_int_sgn(v
);
2594 isl_int_set_si(stride
, 0);
2595 for (i
= 0; i
< n_div
; ++i
) {
2596 isl_constraint_get_coefficient(c
, isl_dim_div
, i
, &v
);
2597 isl_int_gcd(stride
, stride
, v
);
2599 if (!isl_int_is_zero(stride
) &&
2600 isl_int_gt(stride
, bound
->stride
))
2601 extract_stride(c
, bound
, stride
, s
);
2604 isl_int_clear(stride
);
2607 isl_constraint_free(c
);
2611 /* Given contraints on an array index i, check if we can find
2612 * a shift a(p) and a stride g such that
2614 * a(p) + i = 0 mod g
2616 * If so, record the information in bound and apply the mapping
2617 * i -> (i + a(p))/g to the array index in bounds and return
2618 * the new constraints.
2619 * If not, simply return the original constraints.
2621 static __isl_give isl_basic_map
*check_stride(struct cuda_gen
*gen
,
2622 struct cuda_array_bound
*bound
, __isl_take isl_basic_map
*bounds
)
2626 isl_basic_map
*shift
;
2627 isl_qpolynomial
*qp
, *t
;
2630 isl_int_set_si(bound
->stride
, -1);
2632 aff
= isl_basic_map_affine_hull(isl_basic_map_copy(bounds
));
2634 isl_basic_map_foreach_constraint(aff
, &check_stride_constraint
, bound
);
2636 isl_basic_map_free(aff
);
2638 if (isl_int_is_neg(bound
->stride
))
2641 qp
= isl_qpolynomial_copy(bound
->shift
);
2642 qp
= isl_qpolynomial_add_dims(qp
, isl_dim_in
, 1);
2643 dim
= isl_qpolynomial_get_domain_space(qp
);
2644 t
= isl_qpolynomial_var_on_domain(isl_space_copy(dim
), isl_dim_set
, 0);
2645 qp
= isl_qpolynomial_add(qp
, t
);
2647 isl_int_set_si(one
, 1);
2648 t
= isl_qpolynomial_rat_cst_on_domain(dim
, one
, bound
->stride
);
2650 qp
= isl_qpolynomial_mul(qp
, t
);
2651 shift
= isl_basic_map_from_qpolynomial(qp
);
2653 bound
->shift_map
= isl_basic_map_copy(shift
);
2654 bounds
= isl_basic_map_apply_range(bounds
, shift
);
2659 struct cuda_size_info
{
2660 isl_basic_set
*bset
;
2661 struct cuda_array_bound
*bound
;
2665 /* Given a constraint from the basic set describing the bounds on
2666 * an array index, check if it is a lower bound, say m i >= b(x), and,
2667 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
2668 * upper bound. If so, and if this bound is smaller than any bound
2669 * derived from earlier constraints, set the size to this bound on
2670 * the expression and the lower bound to ceil(b(x)/m).
2672 static int compute_size_in_direction(__isl_take isl_constraint
*c
, void *user
)
2674 struct cuda_size_info
*size
= user
;
2679 nparam
= isl_basic_set_dim(size
->bset
, isl_dim_param
);
2680 n_div
= isl_constraint_dim(c
, isl_dim_div
);
2682 if (isl_constraint_involves_dims(c
, isl_dim_div
, 0, n_div
)) {
2683 isl_constraint_free(c
);
2689 isl_constraint_get_coefficient(c
, isl_dim_set
, size
->pos
, &v
);
2691 if (isl_int_is_pos(v
)) {
2694 enum isl_lp_result res
;
2696 aff
= isl_constraint_get_bound(c
, isl_dim_set
, size
->pos
);
2697 aff
= isl_aff_ceil(aff
);
2699 lb
= isl_aff_copy(aff
);
2701 aff
= isl_aff_neg(aff
);
2702 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, size
->pos
, 1);
2704 res
= isl_basic_set_max(size
->bset
, aff
, &v
);
2707 if (res
== isl_lp_ok
) {
2708 isl_int_add_ui(v
, v
, 1);
2709 if (isl_int_is_neg(size
->bound
->size
) ||
2710 isl_int_lt(v
, size
->bound
->size
)) {
2711 isl_int_set(size
->bound
->size
, v
);
2712 lb
= isl_aff_drop_dims(lb
, isl_dim_in
,
2714 isl_aff_free(size
->bound
->lb
);
2715 size
->bound
->lb
= isl_aff_copy(lb
);
2722 isl_constraint_free(c
);
2727 /* Given a basic map "bounds" that maps parameters and input dimensions
2728 * to a single output dimension, look for an expression in the parameters
2729 * and input dimensions such that the range of the output dimension shifted
2730 * by this expression is a constant.
2732 * In particular, we currently only consider lower bounds on the output
2733 * dimension as candidate expressions.
2735 static int compute_array_dim_size(struct cuda_gen
*gen
,
2736 struct cuda_array_bound
*bound
, __isl_take isl_basic_map
*bounds
)
2738 struct cuda_size_info size
;
2740 bounds
= check_stride(gen
, bound
, bounds
);
2742 isl_int_set_si(bound
->size
, -1);
2746 size
.pos
= isl_basic_map_dim(bounds
, isl_dim_in
);
2747 size
.bset
= isl_basic_map_wrap(bounds
);
2748 size
.bset
= isl_basic_set_flatten(size
.bset
);
2749 isl_basic_set_foreach_constraint(size
.bset
, &compute_size_in_direction
,
2751 isl_basic_set_free(size
.bset
);
2753 return isl_int_is_nonneg(bound
->size
) ? 0 : -1;
2756 /* Check if we can find a shared memory tile for the given array
2757 * based on the given accesses, and if so, put the results
2758 * in array->shared_bound.
2760 * We project the accesses on each index in turn and look for a parametric
2761 * offset such that the size is constant.
2763 static int can_tile_for_shared_memory(struct cuda_gen
*gen
,
2764 struct cuda_array_info
*array
, __isl_keep isl_map
*access
,
2765 struct cuda_array_bound
*bounds
)
2769 for (i
= 0; i
< array
->n_index
; ++i
) {
2771 isl_basic_map
*hull
;
2773 access_i
= isl_map_copy(access
);
2774 access_i
= isl_map_project_out(access_i
, isl_dim_out
, 0, i
);
2775 access_i
= isl_map_project_out(access_i
, isl_dim_out
,
2776 1, array
->n_index
- (i
+ 1));
2777 access_i
= isl_map_compute_divs(access_i
);
2778 hull
= isl_map_simple_hull(access_i
);
2779 if (compute_array_dim_size(gen
, &bounds
[i
], hull
) < 0)
2786 /* Construct a map with input the shared tile loops and the loops that
2787 * will be wrapped around the threads that relates these later loops
2788 * to the thread indices and the projects them out.
2790 static __isl_give isl_map
*compute_privatization(struct cuda_gen
*gen
)
2798 dim
= isl_union_map_get_space(gen
->shared_sched
);
2800 if (gen
->options
->wrap
)
2801 tiling
= wrap(isl_space_copy(dim
), gen
->shared_len
+ gen
->n_block
,
2802 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
2804 tiling
= tile(isl_space_copy(dim
), gen
->shared_len
+ gen
->n_block
,
2805 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
2809 par
= parametrization(dim
, gen
->shared_len
+ 2 * gen
->n_block
,
2810 gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
+ gen
->n_block
,
2813 priv
= isl_map_align_params(priv
, isl_set_get_space(par
));
2814 priv
= isl_map_intersect_range(priv
, par
);
2816 dim
= isl_map_get_space(priv
);
2817 dim
= isl_space_drop_dims(dim
, isl_dim_in
, 0, isl_space_dim(dim
, isl_dim_in
));
2818 dim
= isl_space_drop_dims(dim
, isl_dim_out
, 0, isl_space_dim(dim
, isl_dim_out
));
2819 proj
= projection(dim
, gen
->shared_len
+ 2 * gen
->n_block
,
2822 priv
= isl_map_apply_range(priv
, proj
);
2827 /* Construct a map from domain_dim to domain_dim that increments
2828 * the dimension at position "pos" and leaves all other dimensions
2831 static __isl_give isl_map
*next(__isl_take isl_space
*domain_dim
, int pos
)
2834 int len
= isl_space_dim(domain_dim
, isl_dim_set
);
2836 isl_basic_map
*next
;
2837 isl_local_space
*ls
;
2839 dim
= isl_space_map_from_set(domain_dim
);
2840 next
= isl_basic_map_universe(isl_space_copy(dim
));
2841 ls
= isl_local_space_from_space(dim
);
2843 for (i
= 0; i
< len
; ++i
) {
2846 c
= isl_equality_alloc(isl_local_space_copy(ls
));
2847 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, 1);
2848 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
2850 isl_constraint_set_constant_si(c
, 1);
2851 next
= isl_basic_map_add_constraint(next
, c
);
2854 isl_local_space_free(ls
);
2856 return isl_map_from_basic_map(next
);
2859 /* Check if the given access is coalesced.
2860 * That is, check whether incrementing the dimension that will get
2861 * wrapped over the last thread index results in incrementing
2862 * the last array index.
2864 * This function is only called for access relations without reuse.
2866 static int access_is_coalesced(struct cuda_gen
*gen
,
2867 __isl_keep isl_union_map
*access
)
2870 isl_map
*access_map
;
2871 isl_map
*next_thread_x
;
2872 isl_map
*next_element
;
2876 access
= isl_union_map_copy(access
);
2877 access
= isl_union_map_apply_domain(access
,
2878 isl_union_map_copy(gen
->tiled_sched
));
2879 access_map
= isl_map_from_union_map(access
);
2881 dim
= isl_map_get_space(access_map
);
2882 dim
= isl_space_domain(dim
);
2883 next_thread_x
= next(dim
, gen
->shared_len
+ gen
->n_block
- 1);
2885 dim
= isl_map_get_space(access_map
);
2886 dim
= isl_space_range(dim
);
2887 next_element
= next(dim
, isl_space_dim(dim
, isl_dim_set
) - 1);
2889 map
= isl_map_apply_domain(next_thread_x
, isl_map_copy(access_map
));
2890 map
= isl_map_apply_range(map
, access_map
);
2892 coalesced
= isl_map_is_subset(map
, next_element
);
2894 isl_map_free(next_element
);
2900 /* For the given array reference group, check whether the access is private
2901 * to the thread. That is, check that any given array element
2902 * is only accessed by a single thread.
2903 * We compute an access relation that maps the shared tile loop iterators
2904 * and the shared point loop iterators that will be wrapped over the
2905 * threads to the array elements.
2906 * We actually check that those iterators that will be wrapped
2907 * partition the array space. This check is stricter than necessary
2908 * since several iterations may be mapped onto the same thread
2909 * and then they could be allowed to access the same memory elements,
2910 * but our check does not allow this situation.
2912 * We also check that the index expression only depends on parallel
2913 * loops. That way, we can move those loops innermost and unroll them.
2914 * Again, we use a test that is stricter than necessary.
2915 * We actually check whether the index expression only depends
2916 * on the iterators that are wrapped over the threads.
2917 * These are necessarily parallel, but there may be more parallel loops.
2919 * Combining the injectivity of the first test with the single-valuedness
2920 * of the second test, we simply test for bijectivity.
2922 * If it turns out we can use registers, we compute the private memory
2923 * tile size using can_tile_for_shared_memory, after introducing a dependence
2924 * on the thread indices.
2926 * Before performing any of the above computations, we first check
2927 * if there is any reuse on the reference group. If not, we simply
2928 * return. If, moreover, the access is coalesced then we also remove
2929 * the shared memory tiling since we should just use global memory instead.
2931 static void check_private_group_access(struct cuda_gen
*gen
,
2932 struct cuda_array_ref_group
*group
)
2935 isl_union_map
*access
;
2936 int n_index
= group
->array
->n_index
;
2938 access
= group_access_relation(group
, 1, 1);
2939 if (isl_union_map_is_injective(access
)) {
2940 if (group
->shared_bound
&& access_is_coalesced(gen
, access
)) {
2941 free_bound_list(group
->shared_bound
, n_index
);
2942 group
->shared_bound
= NULL
;
2944 isl_union_map_free(access
);
2947 access
= isl_union_map_apply_domain(access
,
2948 isl_union_map_copy(gen
->shared_sched
));
2950 acc
= isl_map_from_union_map(access
);
2952 if (!isl_map_is_bijective(acc
)) {
2957 group
->private_bound
= create_bound_list(gen
->ctx
, n_index
);
2958 acc
= isl_map_align_params(acc
, isl_map_get_space(gen
->privatization
));
2959 acc
= isl_map_apply_domain(acc
, isl_map_copy(gen
->privatization
));
2960 if (!can_tile_for_shared_memory(gen
, group
->array
, acc
,
2961 group
->private_bound
)) {
2962 free_bound_list(group
->private_bound
, n_index
);
2963 group
->private_bound
= NULL
;
2969 /* Look for the last shared tile loop that affects the offset of the
2970 * shared or private tile and store the result in array->last_shared.
2972 static void set_last_shared(struct cuda_gen
*gen
,
2973 struct cuda_array_ref_group
*group
)
2976 struct cuda_array_bound
*bounds
;
2977 unsigned first_shared
= gen
->first_shared
;
2978 int n_index
= group
->array
->n_index
;
2980 bounds
= group
->private_bound
;
2982 bounds
= group
->shared_bound
;
2986 for (j
= gen
->shared_len
- 1; j
>= 0; --j
) {
2987 for (i
= 0; i
< n_index
; ++i
) {
2989 isl_qpolynomial
*shift
;
2992 if (isl_aff_involves_dims(lb
, isl_dim_param
,
2993 first_shared
+ j
, 1))
2996 shift
= bounds
[i
].shift
;
2999 if (isl_qpolynomial_involves_dims(shift
, isl_dim_param
,
3000 first_shared
+ j
, 1))
3006 group
->array
->last_shared
= j
;
3009 /* Compute the sizes of all private arrays for the current kernel,
3010 * as well as the offsets of the private pieces in the original arrays.
3011 * If we cannot or don't want to privatize a given array group,
3012 * we use the shared memory tile sizes computed in
3013 * compute_group_shared_bound instead.
3015 * If a given Array only has a single reference group and if we have
3016 * been able to find a privated or shared tile,
3017 * we also look for the last shared tile loop that affects the offset
3018 * (and therefore the array tile) and store the result in array->last_shared.
3020 * A privatized copy of all access relations from reference groups that
3021 * are mapped to private memory is stored in gen->privatization.
3023 static void compute_private_size(struct cuda_gen
*gen
)
3026 isl_union_map
*private;
3028 if (!gen
->options
->use_private_memory
)
3031 private = isl_union_map_empty(isl_union_map_get_space(gen
->shared_sched
));
3033 for (i
= 0; i
< gen
->n_array
; ++i
) {
3034 struct cuda_array_info
*array
= &gen
->array
[i
];
3036 for (j
= 0; j
< array
->n_group
; ++j
) {
3037 check_private_group_access(gen
, array
->groups
[j
]);
3039 if (!array
->groups
[j
]->private_bound
)
3042 private = isl_union_map_union(private,
3043 group_access_relation(array
->groups
[j
], 1, 1));
3046 array
->last_shared
= gen
->shared_len
- 1;
3047 array
->print_shared_level
= -1;
3049 if (array
->n_group
!= 1)
3051 set_last_shared(gen
, array
->groups
[0]);
3054 if (isl_union_map_is_empty(private))
3055 isl_union_map_free(private);
3057 isl_union_map
*priv
;
3059 private = isl_union_map_apply_domain(private,
3060 isl_union_map_copy(gen
->shared_sched
));
3061 priv
= isl_union_map_from_map(isl_map_copy(gen
->privatization
));
3062 private = isl_union_map_apply_domain(private, priv
);
3063 gen
->private_access
= private;
3067 /* Fill up the groups array with singleton groups, i.e., one group
3068 * per reference, initializing the array, access, write and refs fields.
3069 * In particular the access field is initialized to the scheduled
3070 * access relation of the array reference.
3072 * Return the number of elements initialized, i.e., the number of
3073 * active references in the current kernel.
3075 static int populate_array_references(struct cuda_gen
*gen
,
3076 struct cuda_array_info
*array
, __isl_keep isl_union_map
*sched
,
3077 struct cuda_array_ref_group
**groups
)
3081 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
3084 for (i
= 0; i
< array
->n_ref
; ++i
) {
3085 isl_union_map
*umap
;
3087 struct cuda_array_ref_group
*group
;
3088 struct cuda_stmt_access
*access
= array
->refs
[i
];
3090 map
= isl_map_copy(access
->access
);
3091 umap
= isl_union_map_from_map(map
);
3092 umap
= isl_union_map_apply_domain(umap
,
3093 isl_union_map_copy(sched
));
3095 if (isl_union_map_is_empty(umap
)) {
3096 isl_union_map_free(umap
);
3100 map
= isl_map_from_union_map(umap
);
3102 group
= isl_calloc_type(ctx
, struct cuda_array_ref_group
);
3104 group
->array
= array
;
3105 group
->access
= map
;
3106 group
->write
= access
->write
;
3107 group
->refs
= &array
->refs
[i
];
3109 groups
[n
++] = group
;
3115 static void free_array_ref_group(struct cuda_array_ref_group
*group
,
3120 free_bound_list(group
->shared_bound
, n_index
);
3121 free_bound_list(group
->private_bound
, n_index
);
3122 isl_map_free(group
->access
);
3127 /* If two groups have overlapping access relations and if one of them
3128 * involves a write, then merge the two groups into one.
3130 * We keep track of the grouping in "leader". leader[j] points to
3131 * an earlier group array element that belongs to the same group,
3132 * or the array element j itself if this element is the first in the group.
3134 * Return the number of group leaders.
3136 static int group_overlapping_writes(int n
,
3137 struct cuda_array_ref_group
**groups
, int *leader
)
3142 for (i
= 0; i
< n
; ++i
) {
3144 groups
[l
]->n_ref
= 1;
3145 for (j
= i
- 1; j
>= 0; --j
) {
3151 if (!groups
[l
]->write
&& !groups
[j
]->write
)
3154 map
= isl_map_intersect(isl_map_copy(groups
[l
]->access
),
3155 isl_map_copy(groups
[j
]->access
));
3156 empty
= isl_map_is_empty(map
);
3162 groups
[j
]->access
= isl_map_union(groups
[j
]->access
,
3164 groups
[j
]->write
= 1;
3165 groups
[l
]->access
= NULL
;
3166 groups
[j
]->n_ref
+= groups
[l
]->n_ref
;
3176 /* Compute the size of the shared array corresponding to the given array
3177 * array refrence group, based on the accesses from the current kernel,
3178 * as well as the offset of the shared piece in the original array.
3180 static void compute_group_shared_bound(struct cuda_gen
*gen
,
3181 struct cuda_array_info
*array
, struct cuda_array_ref_group
*group
)
3183 isl_ctx
*ctx
= isl_space_get_ctx(array
->dim
);
3185 if (!gen
->options
->use_shared_memory
)
3188 group
->shared_bound
= create_bound_list(ctx
, array
->n_index
);
3189 if (!can_tile_for_shared_memory(gen
, array
, group
->access
,
3190 group
->shared_bound
)) {
3191 free_bound_list(group
->shared_bound
, array
->n_index
);
3192 group
->shared_bound
= NULL
;
3196 /* Given an initial grouping of array references and shared memory tiles
3197 * for each group that allows for a shared memory tile, merge two groups
3198 * if both have a shared memory tile and if the merged group also has
3199 * a shared memory tile.
3201 * Return the number of group leaders after merging.
3203 static int group_common_shared_memory_tile(struct cuda_gen
*gen
,
3204 struct cuda_array_info
*array
, int n
,
3205 struct cuda_array_ref_group
**groups
, int *leader
, int n_group
)
3208 isl_ctx
*ctx
= isl_space_get_ctx(array
->dim
);
3210 for (i
= 0; n_group
> 1 && i
< n
; ++i
) {
3214 if (!groups
[i
]->shared_bound
)
3216 for (j
= i
- 1; j
>= 0; --j
) {
3219 struct cuda_array_bound
*shared_bound
;
3223 if (!groups
[j
]->shared_bound
)
3226 map
= isl_map_intersect(isl_map_copy(groups
[l
]->access
),
3227 isl_map_copy(groups
[j
]->access
));
3228 empty
= isl_map_is_empty(map
);
3234 map
= isl_map_union(isl_map_copy(groups
[l
]->access
),
3235 isl_map_copy(groups
[j
]->access
));
3236 shared_bound
= create_bound_list(ctx
, array
->n_index
);
3237 if (!can_tile_for_shared_memory(gen
, array
, map
,
3240 free_bound_list(shared_bound
, array
->n_index
);
3244 free_bound_list(groups
[j
]->shared_bound
,
3246 groups
[j
]->shared_bound
= shared_bound
;
3247 isl_map_free(groups
[j
]->access
);
3248 groups
[j
]->access
= map
;
3249 groups
[j
]->n_ref
+= groups
[l
]->n_ref
;
3258 /* Extract an array of array reference groups from the array of references
3259 * and the grouping information in "leader".
3261 * Store the results in array->n_group and array->groups.
3263 static void extract_array_groups(isl_ctx
*ctx
, struct cuda_array_info
*array
,
3264 int n
, struct cuda_array_ref_group
**groups
, int *leader
, int n_group
)
3268 for (i
= 2; i
< n
; ++i
)
3269 leader
[i
] = leader
[leader
[i
]];
3271 array
->n_group
= n_group
;
3272 array
->groups
= isl_alloc_array(ctx
, struct cuda_array_ref_group
*,
3274 assert(array
->groups
);
3277 for (i
= 0; i
< n
; ++i
) {
3279 struct cuda_stmt_access
**refs
;
3281 if (leader
[i
] != i
) {
3282 groups
[i
]->refs
= NULL
;
3283 free_array_ref_group(groups
[i
], array
->n_index
);
3287 refs
= isl_alloc_array(ctx
, struct cuda_stmt_access
*,
3291 for (k
= i
; k
< n
; ++k
)
3292 if (leader
[k
] == i
) {
3293 refs
[l
++] = *groups
[k
]->refs
;
3294 (*groups
[k
]->refs
)->group
= j
;
3297 groups
[i
]->refs
= refs
;
3299 array
->groups
[j
++] = groups
[i
];
3303 /* Group array references that should be considered together when
3304 * deciding whether to access them from private, shared or global memory.
3306 * In particular, if two array references overlap and if one of them
3307 * is a write, then the two references are grouped together.
3308 * Furthermore, if two groups admit a shared memory tile and if the
3309 * combination of the two also admits a shared memory tile, we merge
3312 * During the construction the group->refs field points to a single
3313 * array reference inside the array of array references, while
3314 * group->n_ref contains the number of element in leader that
3315 * (directly or indirectly) point to this group, provided the group
3318 static void group_array_references(struct cuda_gen
*gen
,
3319 struct cuda_array_info
*array
, __isl_keep isl_union_map
*sched
)
3323 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
3324 struct cuda_array_ref_group
**groups
;
3327 groups
= isl_calloc_array(ctx
, struct cuda_array_ref_group
*,
3331 n
= populate_array_references(gen
, array
, sched
, groups
);
3333 leader
= isl_alloc_array(ctx
, int, n
);
3336 n_group
= group_overlapping_writes(n
, groups
, leader
);
3338 for (i
= 0; i
< n
; ++i
)
3340 compute_group_shared_bound(gen
, array
, groups
[i
]);
3342 n_group
= group_common_shared_memory_tile(gen
, array
, n
, groups
,
3345 extract_array_groups(ctx
, array
, n
, groups
, leader
, n_group
);
3351 /* Take tiled_sched, project it onto the shared tile loops and
3352 * the loops that will be wrapped over the threads,
3353 * parametrize the shared tile loops and store the result in gen->shared_sched.
3354 * The position of the first of these parameters is stored in gen->first_shared.
3355 * Also compute a projection that projects out the loops that will be
3356 * wrapped over the threads and store this projection in gen->shared_proj.
3358 static void compute_shared_sched(struct cuda_gen
*gen
)
3363 isl_union_map
*sched
;
3365 sched
= isl_union_map_copy(gen
->tiled_sched
);
3367 dim
= isl_union_map_get_space(sched
);
3368 gen
->first_shared
= isl_space_dim(dim
, isl_dim_param
);
3369 proj
= projection(dim
, gen
->tiled_len
, gen
->shared_len
+ gen
->n_block
);
3370 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
3372 dim
= isl_union_map_get_space(sched
);
3373 par
= parametrization(dim
, gen
->shared_len
+ gen
->n_block
,
3374 0, gen
->shared_len
, "g");
3375 sched
= isl_union_map_intersect_range(sched
,
3376 isl_union_set_from_set(par
));
3378 dim
= isl_union_map_get_space(sched
);
3379 proj
= projection(dim
, gen
->shared_len
+ gen
->n_block
, gen
->shared_len
);
3381 gen
->shared_sched
= sched
;
3382 gen
->shared_proj
= isl_union_map_from_map(proj
);
3385 /* Group references of all arrays in the program.
3387 static void group_references(struct cuda_gen
*gen
)
3390 isl_union_map
*sched
;
3392 sched
= isl_union_map_apply_range(isl_union_map_copy(gen
->shared_sched
),
3393 isl_union_map_copy(gen
->shared_proj
));
3395 for (i
= 0; i
< gen
->n_array
; ++i
)
3396 group_array_references(gen
, &gen
->array
[i
], sched
);
3398 isl_union_map_free(sched
);
3401 /* Free all array information that is local to the current kernel.
3403 static void free_local_array_info(struct cuda_gen
*gen
)
3407 for (i
= 0; i
< gen
->n_array
; ++i
) {
3408 struct cuda_array_info
*array
= &gen
->array
[i
];
3410 for (j
= 0; j
< array
->n_group
; ++j
)
3411 free_array_ref_group(array
->groups
[j
], array
->n_index
);
3412 free(array
->groups
);
3414 if (array
->n_group
== 0)
3416 for (j
= 0; j
< gen
->array
[i
].n_index
; ++j
) {
3417 isl_pw_aff_free(gen
->array
[i
].local_bound
[j
]);
3418 gen
->array
[i
].local_bound
[j
] = NULL
;
3423 static void print_iterator_list(FILE *out
, int len
, const char *prefix
,
3429 for (i
= 0; i
< len
; ++i
) {
3433 fprintf(out
, "(%s%d)", prefix
, i
);
3435 fprintf(out
, "%s%d", prefix
, i
);
3440 /* Print an access to the element in the global memory copy of the
3441 * given array that corresponds to element [a0][a1]... of the original array.
3442 * The copy in global memory has been linearized, so we need to take
3443 * the array size into account.
3445 static void print_global_index(isl_ctx
*ctx
, FILE *out
,
3446 struct cuda_array_info
*array
)
3451 if (cuda_array_is_scalar(array
)) {
3452 fprintf(out
, "*%s", array
->name
);
3456 fprintf(out
, "%s[", array
->name
);
3457 for (i
= 0; i
+ 1 < array
->n_index
; ++i
)
3459 for (i
= 0; i
< array
->n_index
; ++i
) {
3461 prn
= isl_printer_to_file(ctx
, out
);
3462 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
3463 prn
= isl_printer_print_str(prn
, ") * (");
3464 prn
= isl_printer_print_pw_aff(prn
,
3465 array
->local_bound
[i
]);
3466 prn
= isl_printer_print_str(prn
, ") + ");
3467 isl_printer_free(prn
);
3469 fprintf(out
, "a%d", i
);
3474 /* Print an access to the element in the shared memory copy of the
3475 * given array that corresponds to element [a0][a1]... of the original array.
3476 * Since the array in shared memory is just a shifted copy of part
3477 * of the original array, we simply need to subtract the lower bound,
3478 * which was computed in can_tile_for_shared_memory.
3479 * If any of the indices is strided, then we first add
3480 * shared_bound[i].shift and divide by shared_bound[i].stride.
3482 static void print_local_index(FILE *out
, struct cuda_array_ref_group
*group
)
3487 struct cuda_array_bound
*bounds
= group
->shared_bound
;
3489 ctx
= isl_space_get_ctx(group
->array
->dim
);
3490 print_array_name(out
, group
);
3491 for (i
= 0; i
< group
->array
->n_index
; ++i
) {
3492 fprintf(out
, "[(a%d", i
);
3493 if (bounds
[i
].shift
) {
3494 fprintf(out
, " + (");
3495 prn
= isl_printer_to_file(ctx
, out
);
3496 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
3497 prn
= isl_printer_print_qpolynomial(prn
,
3499 prn
= isl_printer_print_str(prn
, "))/");
3500 prn
= isl_printer_print_isl_int(prn
,
3502 isl_printer_free(prn
);
3505 fprintf(out
, " - (");
3506 prn
= isl_printer_to_file(ctx
, out
);
3507 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
3508 prn
= isl_printer_print_aff(prn
, bounds
[i
].lb
);
3509 isl_printer_free(prn
);
3514 /* Print '#define's for copying data from global memory to shared
3515 * memory and back for the given array.
3517 static void print_array_copy_defines(struct cuda_gen
*gen
,
3518 struct cuda_array_ref_group
*group
)
3521 const char *type
[] = { "read", "write" };
3522 struct cuda_array_info
*array
= group
->array
;
3523 int n_index
= array
->n_index
;
3525 for (i
= 0; i
< 2; ++i
) {
3526 fprintf(gen
->cuda
.kernel_c
, "#define %s_", type
[i
]);
3527 print_array_name(gen
->cuda
.kernel_c
, group
);
3528 print_iterator_list(gen
->cuda
.kernel_c
, n_index
, "a", 0);
3529 fprintf(gen
->cuda
.kernel_c
, " %s_", type
[i
]);
3530 print_array_name(gen
->cuda
.kernel_c
, group
);
3531 fprintf(gen
->cuda
.kernel_c
, "_");
3532 print_iterator_list(gen
->cuda
.kernel_c
, n_index
, "a", 1);
3533 fprintf(gen
->cuda
.kernel_c
, "\n");
3535 fprintf(gen
->cuda
.kernel_c
, "#define %s_", type
[i
]);
3536 print_array_name(gen
->cuda
.kernel_c
, group
);
3537 fprintf(gen
->cuda
.kernel_c
, "_");
3538 print_iterator_list(gen
->cuda
.kernel_c
, n_index
, "a", 0);
3540 fprintf(gen
->cuda
.kernel_c
, " ");
3541 print_global_index(gen
->ctx
, gen
->cuda
.kernel_c
, array
);
3542 fprintf(gen
->cuda
.kernel_c
, " = ");
3543 print_local_index(gen
->cuda
.kernel_c
, group
);
3545 fprintf(gen
->cuda
.kernel_c
, " ");
3546 print_local_index(gen
->cuda
.kernel_c
, group
);
3547 fprintf(gen
->cuda
.kernel_c
, " = ");
3548 print_global_index(gen
->ctx
, gen
->cuda
.kernel_c
, array
);
3550 fprintf(gen
->cuda
.kernel_c
, "\n");
3554 static void print_copy_defines(struct cuda_gen
*gen
)
3558 for (i
= 0; i
< gen
->n_array
; ++i
) {
3559 struct cuda_array_info
*array
= &gen
->array
[i
];
3561 for (j
= 0; j
< array
->n_group
; ++j
) {
3562 if (array
->groups
[j
]->private_bound
)
3564 if (!array
->groups
[j
]->shared_bound
)
3566 print_array_copy_defines(gen
, array
->groups
[j
]);
3571 /* The sizes of the arrays on the host that have been computed by
3572 * extract_array_info may depend on the parameters. Use the extra
3573 * constraints on the parameters that are valid at "host_domain"
3574 * to simplify these expressions.
3576 static void localize_bounds(struct cuda_gen
*gen
,
3577 __isl_keep isl_set
*host_domain
)
3582 context
= isl_set_copy(host_domain
);
3583 context
= isl_set_params(host_domain
);
3585 for (i
= 0; i
< gen
->n_array
; ++i
) {
3586 struct cuda_array_info
*array
= &gen
->array
[i
];
3588 if (array
->n_group
== 0)
3591 for (j
= 0; j
< array
->n_index
; ++j
) {
3594 pwaff
= isl_pw_aff_copy(array
->bound
[j
]);
3595 pwaff
= isl_pw_aff_gist(pwaff
, isl_set_copy(context
));
3596 array
->local_bound
[j
] = pwaff
;
3599 isl_set_free(context
);
3602 /* Set gen->tile_len and gen->n_parallel to those of the first statement
3603 * in the statement list u.
3604 * Because of the way the schedule is constructed, the other statements
3605 * in the list, if any, should have the same values for these properties.
3607 static void set_tile_len(struct cuda_gen
*gen
, struct clast_user_stmt
*u
)
3610 struct cuda_stmt
*stmt
;
3612 nr
= atoi(u
->statement
->name
+ 2);
3613 stmt
= &gen
->stmts
[nr
];
3615 gen
->tile_len
= stmt
->tile_len
;
3616 gen
->n_parallel
= stmt
->n_parallel
;
3619 /* This function is called for each leaf in the clast of the host code.
3620 * We first specialize the schedule to the site of the leaf, compute
3621 * the size of shared memory and then print the body of host code
3622 * and the associated kernel (through a call to print_kernel_body).
3624 static void print_host_user(struct gpucode_info
*code
,
3625 struct clast_user_stmt
*u
)
3627 struct cuda_gen
*gen
= code
->user
;
3630 isl_set
*host_domain
;
3631 isl_union_map
*access
;
3632 isl_union_map
*local_sched
;
3633 isl_union_set
*arrays
;
3635 set_tile_len(gen
, u
);
3638 host_domain
= extract_entire_host_domain(u
);
3640 local_sched
= isl_union_map_intersect_range(
3641 isl_union_map_copy(gen
->sched
),
3642 isl_union_set_from_set(extend(isl_set_copy(host_domain
),
3643 gen
->untiled_len
)));
3644 access
= isl_union_map_union(isl_union_map_copy(gen
->read
),
3645 isl_union_map_copy(gen
->write
));
3646 access
= isl_union_map_apply_domain(access
,
3647 isl_union_map_copy(local_sched
));
3648 arrays
= isl_union_map_range(access
);
3650 print_indent(code
->dst
, code
->indent
);
3651 fprintf(code
->dst
, "dim3 k%d_dimBlock(", gen
->kernel_id
);
3652 print_reverse_list(code
->dst
, gen
->n_block
, gen
->block_dim
);
3653 fprintf(code
->dst
, ");\n");
3655 print_indent(code
->dst
, code
->indent
);
3656 fprintf(code
->dst
, "dim3 k%d_dimGrid(", gen
->kernel_id
);
3657 print_reverse_list(code
->dst
, gen
->n_grid
, gen
->grid_dim
);
3658 fprintf(code
->dst
, ");\n");
3660 gen
->tiled_sched
= tile_schedule(gen
, local_sched
);
3661 gen
->tiled_sched
= parametrize_tiled_schedule(gen
, gen
->tiled_sched
);
3662 gen
->tiled_sched
= scale_tile_loops(gen
, gen
->tiled_sched
);
3664 gen
->local_sched
= isl_union_map_copy(gen
->tiled_sched
);
3666 dim
= isl_union_map_get_space(gen
->local_sched
);
3667 par
= parametrization(dim
, gen
->tiled_len
, 0, gen
->shared_len
, "g");
3668 gen
->local_sched
= isl_union_map_intersect_range(gen
->local_sched
,
3669 isl_union_set_from_set(par
));
3671 gen
->local_sched
= thread_tile_schedule(gen
, gen
->local_sched
);
3672 gen
->local_sched
= scale_thread_tile_loops(gen
, gen
->local_sched
);
3674 gen
->private_access
= NULL
;
3675 compute_shared_sched(gen
);
3676 gen
->privatization
= compute_privatization(gen
);
3677 group_references(gen
);
3678 compute_private_size(gen
);
3679 localize_bounds(gen
, host_domain
);
3681 gen
->local_sched
= interchange_for_unroll(gen
, gen
->local_sched
);
3683 print_copy_defines(gen
);
3684 print_kernel_launch(gen
, arrays
);
3686 fprintf(gen
->cuda
.kernel_c
, "{\n");
3688 print_kernel_body(gen
, host_domain
, gen
->tiled_sched
);
3690 fprintf(gen
->cuda
.kernel_c
, "}\n");
3692 free_local_array_info(gen
);
3693 isl_map_free(gen
->privatization
);
3694 isl_union_map_free(gen
->private_access
);
3695 isl_union_map_free(gen
->local_sched
);
3696 isl_union_map_free(gen
->tiled_sched
);
3697 isl_union_map_free(gen
->shared_sched
);
3698 isl_union_map_free(gen
->shared_proj
);
3699 isl_union_set_free(arrays
);
3700 isl_set_free(host_domain
);
3702 free(gen
->tile_size
);
3706 /* Use CLooG to generate code for the outer gen->tile_first loops
3707 * of the global schedule in gen->sched.
3708 * The pretty printing of this code is handled by gpu_print_host_stmt,
3709 * which calls print_host_user for each kernel invocation location.
3711 static void print_cloog_host_code(struct cuda_gen
*gen
)
3715 isl_union_map
*sched
;
3716 CloogOptions
*options
;
3717 CloogDomain
*cloog_context
;
3718 CloogUnionDomain
*ud
;
3720 struct clast_stmt
*stmt
;
3723 options
= cloog_options_malloc(gen
->state
);
3724 options
->language
= CLOOG_LANGUAGE_C
;
3726 options
->strides
= 1;
3727 options
->stop
= gen
->tile_first
;
3728 options
->f
= gen
->untiled_len
;
3729 options
->l
= gen
->untiled_len
;
3730 options
->save_domains
= 1;
3731 options
->noscalars
= 1;
3733 sched
= isl_union_map_copy(gen
->sched
);
3734 ud
= cloog_union_domain_from_isl_union_map(sched
);
3735 for (i
= 0; i
< options
->stop
; ++i
) {
3736 snprintf(name
, sizeof(name
), "h%d", i
);
3737 ud
= cloog_union_domain_set_name(ud
, CLOOG_SCAT
, i
, name
);
3739 context
= isl_set_copy(gen
->context
);
3740 cloog_context
= cloog_domain_from_isl_set(context
);
3741 input
= cloog_input_alloc(cloog_context
, ud
);
3743 stmt
= cloog_clast_create_from_input(input
, options
);
3745 gen
->code
.indent
= 0;
3746 gen
->code
.dst
= gen
->cuda
.host_c
;
3747 gen
->code
.print_user_stmt
= NULL
;
3748 gen
->code
.print_user_stmt_list
= &print_host_user
;
3749 gen
->code
.print_for_head
= NULL
;
3750 gen
->code
.print_for_foot
= NULL
;
3751 gen
->code
.user
= gen
;
3752 gpu_print_host_stmt(&gen
->code
, stmt
);
3754 cloog_clast_free(stmt
);
3755 cloog_options_free(options
);
3756 fprintf(gen
->cuda
.host_c
, "\n");
3759 void print_cuda_macros(struct cuda_gen
*gen
)
3761 const char *macros
=
3762 "#define cudaCheckReturn(ret) assert((ret) == cudaSuccess)\n"
3763 "#define cudaCheckKernel()"
3764 " assert(cudaGetLastError() == cudaSuccess)\n\n";
3765 fputs(macros
, gen
->cuda
.host_c
);
3768 void print_host_code(struct cuda_gen
*gen
)
3770 fprintf(gen
->cuda
.host_c
, "{\n");
3771 print_cloog_macros(gen
->cuda
.host_c
);
3772 print_cloog_macros(gen
->cuda
.kernel_c
);
3774 print_cuda_macros(gen
);
3776 declare_device_arrays(gen
);
3778 allocate_device_arrays(gen
);
3779 copy_arrays_to_device(gen
);
3782 print_cloog_host_code(gen
);
3784 copy_arrays_from_device(gen
);
3785 free_device_arrays(gen
);
3787 fprintf(gen
->cuda
.host_c
, "}\n");
3790 __isl_give isl_set
*add_context_from_str(__isl_take isl_set
*set
,
3799 ctx
= isl_set_get_ctx(set
);
3800 context
= isl_set_read_from_str(ctx
, str
);
3801 context
= isl_set_align_params(context
, isl_set_get_space(set
));
3802 set
= isl_set_intersect(set
, context
);
3807 /* Return the union of all iteration domains of the gen->stmts[i].
3809 static __isl_give isl_union_set
*extract_domain(struct cuda_gen
*gen
)
3812 isl_union_set
*domain
;
3814 domain
= isl_union_set_empty(isl_set_get_space(gen
->context
));
3815 for (i
= 0; i
< gen
->n_stmts
; ++i
) {
3818 domain_i
= isl_set_copy(gen
->stmts
[i
].domain
);
3819 domain
= isl_union_set_union(domain
,
3820 isl_union_set_from_set(domain_i
));
3826 /* Information about the outermost tilable bands in the forest of bands.
3828 * tile_len and n_parallel are only sets on band_info structures
3829 * that correspond to outermost bands. For other bands (in particular,
3830 * ancestors of the outermost bands), n_parallal is set to 0.
3832 * prefix is the (padded) schedule leading up to the outermost tilable bands.
3834 * tile_first is the number of schedule dimensions in prefix.
3836 * suffix is the schedule of the outermost tilable bands and their descendants.
3839 struct cuda_gen
*gen
;
3843 isl_union_map
*prefix
;
3844 isl_union_map
*suffix
;
3847 /* Set tile_len and n_parallel of the statement to that of
3848 * their outermost band, recorded in the band_info.
3850 static int set_stmt_tile_len(__isl_take isl_map
*map
, void *user
)
3852 struct band_info
*info
= user
;
3854 struct cuda_stmt
*stmt
;
3856 nr
= atoi(isl_map_get_tuple_name(map
, isl_dim_in
) + 2);
3857 stmt
= &info
->gen
->stmts
[nr
];
3859 stmt
->tile_len
= info
->tile_len
;
3860 stmt
->n_parallel
= info
->n_parallel
;
3867 static void list_select_outer_band(struct cuda_gen
*gen
,
3868 __isl_take isl_band_list
*list
, int pos
, struct band_info
*list_info
);
3870 /* Check if this band has any parallel loops. If so, take it as
3871 * the outermost tilable band. If not, continue looking for the
3872 * outermost tilable band in the children of the current band.
3874 static void band_select_outer_band(struct cuda_gen
*gen
,
3875 __isl_take isl_band
*band
, int pos
, struct band_info
*info
)
3877 int n
= isl_band_n_member(band
);
3880 for (n_parallel
= 0; n_parallel
< n
; ++n_parallel
)
3881 if (!isl_band_member_is_zero_distance(band
, n_parallel
))
3884 info
->n_parallel
= n_parallel
;
3887 info
->tile_first
= pos
;
3889 info
->prefix
= isl_band_get_prefix_schedule(band
);
3890 info
->suffix
= isl_union_map_flat_range_product(
3891 isl_band_get_partial_schedule(band
),
3892 isl_band_get_suffix_schedule(band
));
3893 isl_union_map_foreach_map(info
->prefix
,
3894 &set_stmt_tile_len
, info
);
3896 isl_band_list
*children
;
3897 if (!isl_band_has_children(band
))
3898 isl_die(isl_band_get_ctx(band
), isl_error_unknown
,
3899 "unable to detect any parallelism", abort());
3900 children
= isl_band_get_children(band
);
3901 list_select_outer_band(gen
, children
, pos
+ n
, info
);
3904 isl_band_free(band
);
3907 /* Comparison function that returns a non-zero value for band_infos
3908 * with different tile_len fields or different n_parallel fields.
3910 static int cmp_band(const void *p1
, const void *p2
)
3912 const struct band_info
*info1
= p1
;
3913 const struct band_info
*info2
= p2
;
3915 if (info1
->tile_len
!= info2
->tile_len
)
3916 return info1
->tile_len
- info2
->tile_len
;
3918 return info1
->n_parallel
- info2
->n_parallel
;
3921 /* Extend "umap" with coordinates with fixed value "val"
3922 * to a total length of "dst_len", assuming the original dimension is "src_len".
3924 static __isl_give isl_union_map
*extend_range(__isl_take isl_union_map
*umap
,
3925 int src_len
, int dst_len
, int val
)
3931 dim
= isl_union_map_get_space(umap
);
3932 map
= isl_map_reverse(projection(dim
, dst_len
, src_len
));
3933 for (i
= src_len
; i
< dst_len
; ++i
)
3934 map
= isl_map_fix_si(map
, isl_dim_out
, i
, val
);
3936 umap
= isl_union_map_apply_range(umap
, isl_union_map_from_map(map
));
3941 /* Group bands with the same values for tile_len and n_parallel.
3942 * The prefix schedule is then extended with a fixed coordinate that
3943 * is different for each such group.
3944 * Note that the actual values for this coordinate are not important.
3945 * The bands have already been effectively separated at a higher level
3946 * or they are independent and may be executed in parallel.
3947 * The list of band_info has been sorted before this functions is called.
3949 static void separate_bands(struct band_info
*info
, int n
)
3954 for (i
= 0; i
< n
; ++i
) {
3955 int l
= info
[i
].tile_first
;
3958 (info
[i
].tile_len
!= info
[i
- 1].tile_len
||
3959 info
[i
].n_parallel
!= info
[i
- 1].n_parallel
))
3962 info
[i
].prefix
= extend_range(info
[i
].prefix
,
3964 info
[i
].tile_first
= l
+ 1;
3968 /* Select the outermost bands in the elements of the list, align
3969 * their prefix schedules, separate bands with different values
3970 * for tile_len and/or n_parallel and then combine the resulting
3971 * prefix and suffix schedules into a single pair of prefix and
3972 * suffix schedules for the entire list.
3974 static void list_select_outer_band(struct cuda_gen
*gen
,
3975 __isl_take isl_band_list
*list
, int pos
, struct band_info
*list_info
)
3979 int n
= isl_band_list_n_band(list
);
3980 isl_ctx
*ctx
= isl_band_list_get_ctx(list
);
3981 struct band_info
*info
;
3983 isl_union_map
*prefix
;
3984 isl_union_map
*suffix
;
3987 info
= isl_calloc_array(ctx
, struct band_info
, n
);
3991 for (i
= 0; i
< n
; ++i
) {
3992 band
= isl_band_list_get_band(list
, i
);
3993 band_select_outer_band(gen
, band
, pos
, &info
[i
]);
3994 if (info
[i
].tile_first
> max_tile_first
)
3995 max_tile_first
= info
[i
].tile_first
;
3998 for (i
= 0; i
< n
; ++i
) {
3999 if (info
[i
].tile_first
== max_tile_first
)
4001 info
[i
].prefix
= extend_range(info
[i
].prefix
,
4002 info
[i
].tile_first
, max_tile_first
, 0);
4005 qsort(info
, n
, sizeof(struct band_info
), &cmp_band
);
4007 for (i
= 0; i
< n
- 1; ++i
)
4008 if (info
[i
].tile_len
!= info
[i
+ 1].tile_len
||
4009 info
[i
].n_parallel
!= info
[i
+ 1].n_parallel
)
4013 separate_bands(info
, n
);
4015 prefix
= info
[0].prefix
;
4016 suffix
= info
[0].suffix
;
4018 for (i
= 1; i
< n
; ++i
) {
4019 prefix
= isl_union_map_union(prefix
, info
[i
].prefix
);
4020 suffix
= isl_union_map_union(suffix
, info
[i
].suffix
);
4023 list_info
->tile_first
= info
[0].tile_first
;
4024 list_info
->tile_len
= -1;
4025 list_info
->prefix
= prefix
;
4026 list_info
->suffix
= suffix
;
4028 isl_band_list_free(list
);
4032 /* Set max_out to the maximal number of output dimensions over
4035 static int update_max_out(__isl_take isl_map
*map
, void *user
)
4037 int *max_out
= user
;
4038 int n_out
= isl_map_dim(map
, isl_dim_out
);
4040 if (n_out
> *max_out
)
4047 struct align_range_data
{
4052 /* Extend the dimension of the range of the given map to data->max_out and
4053 * then add the result to data->res.
4055 static int map_align_range(__isl_take isl_map
*map
, void *user
)
4057 struct align_range_data
*data
= user
;
4061 int n_out
= isl_map_dim(map
, isl_dim_out
);
4063 dim
= isl_union_map_get_space(data
->res
);
4064 proj
= isl_map_reverse(projection(dim
, data
->max_out
, n_out
));
4065 for (i
= n_out
; i
< data
->max_out
; ++i
)
4066 proj
= isl_map_fix_si(proj
, isl_dim_out
, i
, 0);
4068 map
= isl_map_apply_range(map
, proj
);
4070 data
->res
= isl_union_map_add_map(data
->res
, map
);
4075 /* Extend the ranges of the maps in the union map such they all have
4076 * the same dimension.
4078 static __isl_give isl_union_map
*align_range(__isl_take isl_union_map
*umap
)
4080 struct align_range_data data
;
4083 isl_union_map_foreach_map(umap
, &update_max_out
, &data
.max_out
);
4085 data
.res
= isl_union_map_empty(isl_union_map_get_space(umap
));
4086 isl_union_map_foreach_map(umap
, &map_align_range
, &data
);
4088 isl_union_map_free(umap
);
4092 /* Select the outermost tilable band that (by construction)
4093 * has at least one parallel loop.
4094 * The starting position of the aligned band is stored in the pair
4096 * The sizes and number of parallel loops may be different in different
4097 * parts of the band forest and are therefore stored in the cuda_stmts.
4099 * Return the complete schedule, with the tilable bands aligned
4100 * at gen->tile_first and padded with zero, if needed.
4102 static __isl_give isl_union_map
*select_outer_tilable_band(struct cuda_gen
*gen
,
4103 __isl_keep isl_schedule
*schedule
)
4105 isl_band_list
*list
;
4106 struct band_info info
;
4108 gen
->n_parallel
= 0;
4111 list
= isl_schedule_get_band_forest(schedule
);
4113 list_select_outer_band(gen
, list
, 0, &info
);
4115 gen
->tile_first
= info
.tile_first
;
4116 info
.suffix
= align_range(info
.suffix
);
4118 return isl_union_map_flat_range_product(info
.prefix
, info
.suffix
);
4121 /* Set gen->untiled_len to the number of scheduling dimensions
4122 * for the schedule of the first domain.
4123 * We assume here that this number is the same for all domains.
4125 static int set_untiled_len(__isl_take isl_map
*map
, void *user
)
4127 unsigned *untiled_len
= user
;
4129 *untiled_len
= isl_map_dim(map
, isl_dim_out
);
4135 /* Compute an appropriate schedule based on the accesses in
4136 * gen->read and gen->write.
4138 * We first compute dependences and then use those to compute
4139 * a schedule that has a parallel loop in each tilable band.
4140 * Finally, we select the outermost tilable band.
4142 static void compute_schedule(struct cuda_gen
*gen
,
4143 __isl_take isl_union_map
*sched
)
4145 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
4146 isl_union_set
*domain
;
4147 isl_union_map
*empty
;
4148 isl_union_map
*dep_raw
, *dep2
, *dep3
, *dep
;
4149 isl_union_map
*uninitialized
;
4150 isl_schedule
*schedule
;
4151 struct isl_options
*options
;
4153 empty
= isl_union_map_empty(isl_union_map_get_space(sched
));
4155 isl_union_map_compute_flow(isl_union_map_copy(gen
->read
),
4156 isl_union_map_copy(gen
->write
), empty
,
4157 isl_union_map_copy(sched
),
4158 &dep_raw
, NULL
, &uninitialized
, NULL
);
4159 isl_union_map_compute_flow(isl_union_map_copy(gen
->write
),
4160 isl_union_map_copy(gen
->write
),
4161 isl_union_map_copy(gen
->read
),
4162 isl_union_map_copy(sched
),
4163 &dep2
, &dep3
, NULL
, NULL
);
4164 isl_union_map_free(sched
);
4166 gen
->copy_in
= isl_union_map_range(uninitialized
);
4168 dep
= isl_union_map_union(dep2
, dep3
);
4169 dep
= isl_union_map_union(dep
, dep_raw
);
4170 dep
= isl_union_map_coalesce(dep
);
4172 domain
= extract_domain(gen
);
4173 options
= isl_ctx_peek_options(ctx
, isl_options_arg
);
4174 options
->schedule_outer_zero_distance
= 1;
4175 schedule
= isl_union_set_compute_schedule(isl_union_set_copy(domain
),
4176 isl_union_map_copy(dep
), dep
);
4178 sched
= select_outer_tilable_band(gen
, schedule
);
4180 isl_union_map_foreach_map(sched
, &set_untiled_len
, &gen
->untiled_len
);
4181 sched
= isl_union_map_intersect_domain(sched
, domain
);
4184 isl_schedule_free(schedule
);
4187 static struct cuda_stmt_access
**expr_extract_access(struct pet_expr
*expr
,
4188 struct cuda_stmt_access
**next_access
)
4190 struct cuda_stmt_access
*access
;
4191 isl_ctx
*ctx
= isl_map_get_ctx(expr
->acc
.access
);
4193 access
= isl_alloc_type(ctx
, struct cuda_stmt_access
);
4195 access
->next
= NULL
;
4196 access
->read
= expr
->acc
.read
;
4197 access
->write
= expr
->acc
.write
;
4198 access
->access
= isl_map_copy(expr
->acc
.access
);
4200 *next_access
= access
;
4201 next_access
= &(*next_access
)->next
;
4205 static struct cuda_stmt_access
**expr_extract_accesses(struct pet_expr
*expr
,
4206 struct cuda_stmt_access
**next_access
)
4210 for (i
= 0; i
< expr
->n_arg
; ++i
)
4211 next_access
= expr_extract_accesses(expr
->args
[i
],
4214 if (expr
->type
== pet_expr_access
)
4215 next_access
= expr_extract_access(expr
, next_access
);
4220 static void pet_stmt_extract_accesses(struct cuda_stmt
*stmt
)
4222 struct cuda_stmt_access
**next_access
= &stmt
->accesses
;
4224 stmt
->accesses
= NULL
;
4225 expr_extract_accesses(stmt
->body
, next_access
);
4228 /* Return an array of cuda_stmt representing the statements in "scop".
4230 static struct cuda_stmt
*extract_stmts(isl_ctx
*ctx
, struct pet_scop
*scop
,
4231 __isl_keep isl_set
*context
)
4234 struct cuda_stmt
*stmts
;
4236 stmts
= isl_calloc_array(ctx
, struct cuda_stmt
, scop
->n_stmt
);
4239 for (i
= 0; i
< scop
->n_stmt
; ++i
) {
4240 struct cuda_stmt
*s
= &stmts
[i
];
4242 s
->domain
= isl_set_copy(scop
->stmts
[i
]->domain
);
4243 s
->domain
= isl_set_intersect_params(s
->domain
,
4244 isl_set_copy(context
));
4245 s
->body
= scop
->stmts
[i
]->body
;
4246 pet_stmt_extract_accesses(s
);
4252 /* Replace the scop in the "input" file by equivalent code
4253 * that uses the GPU. "scop" is assumed to correspond to this scop.
4255 * We first compute a schedule that respects the dependences
4256 * of the original program and select the outermost band
4257 * of tilable dimensions that has at least one parallel loop.
4258 * We then have three blocks of dimensions
4262 * The tilable band "B" is first tiled according to "tile.sizes", resulting
4267 * For each iteration of the T loop and for each array, we compute
4268 * the array elements accessed by that iteration, construct a rectangular
4269 * box around it and shift it to the origin. The result is used
4270 * as shared memory for the array.
4272 * We then split off at most 2 parallel loops from the T loops and
4273 * at most 3 parallel loops from the P loops
4277 * The T1/P1 loops are then tiled or "wrapped" over the blocks/threads,
4278 * according to "grid.sizes"/"block.sizes".
4280 * H T1T T1P T2 P1T P1P P2 G
4282 * Finally, the T1P and P1P iterators are equated to the block and
4283 * thread dimensions respectively and so are effectively removed.
4284 * The H loops are run on the host. The T1T, T2, P1T, P2 and G loops
4285 * are run on the GPU.
4287 * Code is generated in three stages. We first generate code for the
4288 * host (the H loops), with iterators h%d. Then, for each leaf node
4289 * of the resulting AST, we generate code for the shared loops (up to
4290 * and including T2), with iterators g%d and after equating the H loops
4291 * to h%d parameters and the T1P loops to the block dimensions.
4292 * Finally, we generate code for the remaining loops in a similar fashion.
4294 int cuda_pet(isl_ctx
*ctx
, struct pet_scop
*scop
, struct ppcg_options
*options
,
4297 isl_union_map
*sched
;
4298 struct cuda_gen gen
;
4303 scop
= pet_scop_align_params(scop
);
4306 gen
.context
= isl_set_copy(scop
->context
);
4307 gen
.context
= add_context_from_str(gen
.context
, options
->ctx
);
4308 gen
.n_stmts
= scop
->n_stmt
;
4309 gen
.stmts
= extract_stmts(ctx
, scop
, gen
.context
);
4310 gen
.read
= pet_scop_collect_reads(scop
);
4311 gen
.write
= pet_scop_collect_writes(scop
);
4312 gen
.options
= options
;
4313 gen
.state
= cloog_isl_state_malloc(gen
.ctx
);
4316 cuda_open_files(&gen
.cuda
, input
);
4318 collect_array_info(&gen
);
4320 sched
= pet_scop_collect_schedule(scop
);
4322 compute_schedule(&gen
, sched
);
4324 print_host_code(&gen
);
4326 cloog_state_free(gen
.state
);
4327 clear_cuda_gen(&gen
);
4329 cuda_close_files(&gen
.cuda
);