2 * Copyright 2010-2011 INRIA Saclay
4 * Use of this software is governed by the GNU LGPLv2.1 license
6 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
7 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
14 #include <isl/polynomial.h>
15 #include <isl/union_set.h>
20 #include <isl/schedule.h>
21 #include <isl/options.h>
22 #include <cloog/isl/cloog.h>
25 #include "cuda_common.h"
28 #include "ppcg_options.h"
30 /* The fields stride, shift and shift_map only contain valid information
32 * If so, they express that current index is such that if you add shift,
33 * then the result is always a multiple of stride.
34 * shift_map contains the mapping
36 * i -> (i + shift)/stride
38 struct cuda_array_bound
{
44 isl_basic_map
*shift_map
;
47 struct cuda_array_info
;
49 /* A group of array references in a kernel that should be handled together.
50 * If private_bound is not NULL, then it is mapped to registers.
51 * Otherwise, if shared_bound is not NULL, it is mapped to shared memory.
52 * Otherwise, it is accessed from global memory.
54 struct cuda_array_ref_group
{
55 /* The references in this group access this array. */
56 struct cuda_array_info
*array
;
57 /* Position of this group in the list of reference groups of array. */
60 /* The following fields are use during the construction of the groups.
61 * access is the combined access relation relative to the shared
63 * write is set if any access in the group is a write.
68 /* For each index, size and offset of piece in shared memory. */
69 struct cuda_array_bound
*shared_bound
;
71 /* For each index, size and offset of piece in private memory. */
72 struct cuda_array_bound
*private_bound
;
74 /* References in this group; point to elements of a linked list. */
76 struct cuda_stmt_access
**refs
;
79 struct cuda_array_info
{
83 /* Name of the array. */
85 /* Number of indices. */
87 /* For each index, a bound on the array in that direction. */
89 /* For each index, bound[i] specialized to the current kernel. */
90 isl_pw_aff
**local_bound
;
92 /* All references to this array; point to elements of a linked list. */
94 struct cuda_stmt_access
**refs
;
96 /* The reference groups associated to this array. */
98 struct cuda_array_ref_group
**groups
;
100 /* Last shared memory tile dimension that affects tile of this array. */
102 /* Dimension at which copying to/from shared memory is printed.
103 * if >= 0, then the value is >= last_shared
104 * if -1, then the copying is done at the leaf level.
106 int print_shared_level
;
109 /* Print the name of the local copy of a given group of array references.
111 static void print_array_name(FILE *out
, struct cuda_array_ref_group
*group
)
115 if (group
->private_bound
)
116 fprintf(out
, "private_");
117 else if (group
->shared_bound
)
118 fprintf(out
, "shared_");
121 fprintf(out
, "%s", group
->array
->name
);
122 if (!global
&& group
->array
->n_group
> 1)
123 fprintf(out
, "_%d", group
->nr
);
126 /* Collect all references to the given array and store pointers to them
129 static void collect_references(struct cuda_gen
*gen
,
130 struct cuda_array_info
*array
)
136 for (i
= 0; i
< gen
->n_stmts
; ++i
) {
137 struct cuda_stmt
*stmt
= &gen
->stmts
[i
];
138 struct cuda_stmt_access
*access
;
140 for (access
= stmt
->accesses
; access
; access
= access
->next
) {
142 name
= isl_map_get_tuple_name(access
->access
,
144 if (name
&& !strcmp(array
->name
, name
))
150 array
->refs
= isl_alloc_array(gen
->ctx
, struct cuda_stmt_access
*, n
);
154 for (i
= 0; i
< gen
->n_stmts
; ++i
) {
155 struct cuda_stmt
*stmt
= &gen
->stmts
[i
];
156 struct cuda_stmt_access
*access
;
158 for (access
= stmt
->accesses
; access
; access
= access
->next
) {
160 name
= isl_map_get_tuple_name(access
->access
,
162 if (!name
|| strcmp(array
->name
, name
))
165 array
->refs
[n
++] = access
;
170 static struct cuda_array_bound
*create_bound_list(isl_ctx
*ctx
, int n_index
)
173 struct cuda_array_bound
*bound
;
175 bound
= isl_alloc_array(ctx
, struct cuda_array_bound
, n_index
);
178 for (i
= 0; i
< n_index
; ++i
) {
179 isl_int_init(bound
[i
].size
);
181 isl_int_init(bound
[i
].stride
);
182 bound
[i
].shift
= NULL
;
183 bound
[i
].shift_map
= NULL
;
189 static void free_bound_list(struct cuda_array_bound
*bound
, int n_index
)
196 for (j
= 0; j
< n_index
; ++j
) {
197 isl_int_clear(bound
[j
].size
);
198 isl_int_clear(bound
[j
].stride
);
199 isl_aff_free(bound
[j
].lb
);
200 isl_aff_free(bound
[j
].shift
);
201 isl_basic_map_free(bound
[j
].shift_map
);
206 static struct pet_array
*find_array(struct pet_scop
*scop
,
207 __isl_keep isl_set
*accessed
)
212 id
= isl_set_get_tuple_id(accessed
);
214 for (i
= 0; i
< scop
->n_array
; ++i
) {
217 id_i
= isl_set_get_tuple_id(scop
->arrays
[i
]->extent
);
224 return i
< scop
->n_array
? scop
->arrays
[i
] : NULL
;
227 /* Compute bounds on the host arrays based on the accessed elements
228 * and collect all references to the array.
230 static int extract_array_info(__isl_take isl_set
*array
, void *user
)
233 struct cuda_gen
*gen
= (struct cuda_gen
*)user
;
237 isl_pw_aff
**local_bounds
;
238 struct pet_array
*pa
;
240 n_index
= isl_set_dim(array
, isl_dim_set
);
241 name
= isl_set_get_tuple_name(array
);
242 bounds
= isl_alloc_array(isl_set_get_ctx(array
),
243 isl_pw_aff
*, n_index
);
245 local_bounds
= isl_calloc_array(isl_set_get_ctx(array
),
246 isl_pw_aff
*, n_index
);
247 assert(local_bounds
);
248 gen
->array
[gen
->n_array
].dim
= isl_set_get_space(array
);
249 gen
->array
[gen
->n_array
].name
= strdup(name
);
250 gen
->array
[gen
->n_array
].n_index
= n_index
;
251 gen
->array
[gen
->n_array
].bound
= bounds
;
252 gen
->array
[gen
->n_array
].local_bound
= local_bounds
;
254 pa
= find_array(gen
->scop
, array
);
257 gen
->array
[gen
->n_array
].type
= strdup(pa
->element_type
);
259 for (i
= 0; i
< n_index
; ++i
) {
264 isl_set
*size
= i
== 0 ? array
: pa
->extent
;
266 bound
= isl_set_dim_max(isl_set_copy(size
), i
);
268 dom
= isl_pw_aff_domain(isl_pw_aff_copy(bound
));
269 ls
= isl_local_space_from_space(isl_set_get_space(dom
));
270 one
= isl_aff_zero_on_domain(ls
);
271 one
= isl_aff_add_constant_si(one
, 1);
272 bound
= isl_pw_aff_add(bound
, isl_pw_aff_alloc(dom
, one
));
273 bound
= isl_pw_aff_gist(bound
, isl_set_copy(gen
->context
));
278 collect_references(gen
, &gen
->array
[gen
->n_array
]);
286 void collect_array_info(struct cuda_gen
*gen
)
288 isl_union_set
*arrays
;
290 arrays
= isl_union_map_range(isl_union_map_copy(gen
->read
));
291 arrays
= isl_union_set_union(arrays
,
292 isl_union_map_range(isl_union_map_copy(gen
->write
)));
293 arrays
= isl_union_set_coalesce(arrays
);
295 gen
->n_array
= isl_union_set_n_set(arrays
);
296 gen
->array
= isl_alloc_array(gen
->ctx
,
297 struct cuda_array_info
, gen
->n_array
);
300 isl_union_set_foreach_set(arrays
, &extract_array_info
, gen
);
301 isl_union_set_free(arrays
);
304 static void free_array_info(struct cuda_gen
*gen
)
308 for (i
= 0; i
< gen
->n_array
; ++i
) {
309 int n_index
= gen
->array
[i
].n_index
;
310 free(gen
->array
[i
].type
);
311 free(gen
->array
[i
].name
);
312 for (j
= 0; j
< n_index
; ++j
) {
313 isl_pw_aff_free(gen
->array
[i
].bound
[j
]);
314 isl_pw_aff_free(gen
->array
[i
].local_bound
[j
]);
316 isl_space_free(gen
->array
[i
].dim
);
317 free(gen
->array
[i
].bound
);
318 free(gen
->array
[i
].local_bound
);
319 free(gen
->array
[i
].refs
);
324 static void declare_device_arrays(struct cuda_gen
*gen
)
328 for (i
= 0; i
< gen
->n_array
; ++i
)
329 fprintf(gen
->cuda
.host_c
, "%s *dev_%s;\n",
330 gen
->array
[i
].type
, gen
->array
[i
].name
);
331 fprintf(gen
->cuda
.host_c
, "\n");
334 static void print_array_size(struct cuda_gen
*gen
, FILE *out
,
335 struct cuda_array_info
*array
)
340 prn
= isl_printer_to_file(gen
->ctx
, out
);
341 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
342 for (i
= 0; i
< array
->n_index
; ++i
) {
343 prn
= isl_printer_print_str(prn
, "(");
344 prn
= isl_printer_print_pw_aff(prn
, array
->bound
[i
]);
345 prn
= isl_printer_print_str(prn
, ") * ");
347 prn
= isl_printer_print_str(prn
, "sizeof(");
348 prn
= isl_printer_print_str(prn
, array
->type
);
349 prn
= isl_printer_print_str(prn
, ")");
350 isl_printer_free(prn
);
353 static void allocate_device_arrays(struct cuda_gen
*gen
)
357 for (i
= 0; i
< gen
->n_array
; ++i
) {
358 fprintf(gen
->cuda
.host_c
,
359 "cudaCheckReturn(cudaMalloc((void **) &dev_%s, ",
361 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
362 fprintf(gen
->cuda
.host_c
, "));\n");
364 fprintf(gen
->cuda
.host_c
, "\n");
367 static void free_device_arrays(struct cuda_gen
*gen
)
371 for (i
= 0; i
< gen
->n_array
; ++i
)
372 fprintf(gen
->cuda
.host_c
, "cudaCheckReturn(cudaFree(dev_%s));\n",
376 /* Check if a cuda array is a scalar. A scalar is a value that is not stored
377 * as an array or through a pointer reference, but as single data element. At
378 * the moment, scalars are represented as zero dimensional arrays.
380 static int cuda_array_is_scalar(struct cuda_array_info
*array
)
382 return (array
->n_index
== 0);
385 static void copy_arrays_to_device(struct cuda_gen
*gen
)
389 for (i
= 0; i
< gen
->n_array
; ++i
) {
394 dim
= isl_space_copy(gen
->array
[i
].dim
);
395 read_i
= isl_union_set_extract_set(gen
->copy_in
, dim
);
396 empty
= isl_set_fast_is_empty(read_i
);
397 isl_set_free(read_i
);
401 fprintf(gen
->cuda
.host_c
, "cudaCheckReturn(cudaMemcpy(dev_%s,",
404 if (cuda_array_is_scalar(&(gen
->array
[i
])))
405 fprintf(gen
->cuda
.host_c
, " &%s, ",
408 fprintf(gen
->cuda
.host_c
, " %s, ", gen
->array
[i
].name
);
410 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
411 fprintf(gen
->cuda
.host_c
, ", cudaMemcpyHostToDevice));\n");
413 fprintf(gen
->cuda
.host_c
, "\n");
416 static void copy_arrays_from_device(struct cuda_gen
*gen
)
419 isl_union_set
*write
;
420 write
= isl_union_map_range(isl_union_map_copy(gen
->write
));
422 for (i
= 0; i
< gen
->n_array
; ++i
) {
427 dim
= isl_space_copy(gen
->array
[i
].dim
);
428 write_i
= isl_union_set_extract_set(write
, dim
);
429 empty
= isl_set_fast_is_empty(write_i
);
430 isl_set_free(write_i
);
434 fprintf(gen
->cuda
.host_c
, "cudaCheckReturn(cudaMemcpy(");
435 if (cuda_array_is_scalar(&gen
->array
[i
]))
436 fprintf(gen
->cuda
.host_c
, "&%s, ", gen
->array
[i
].name
);
438 fprintf(gen
->cuda
.host_c
, "%s, ", gen
->array
[i
].name
);
439 fprintf(gen
->cuda
.host_c
, "dev_%s, ", gen
->array
[i
].name
);
440 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
441 fprintf(gen
->cuda
.host_c
, ", cudaMemcpyDeviceToHost));\n");
444 isl_union_set_free(write
);
445 fprintf(gen
->cuda
.host_c
, "\n");
448 static void read_sizes_from_file(struct cuda_gen
*gen
, const char *filename
,
454 file
= fopen(filename
, "r");
458 for (i
= 0; i
< len
; ++i
)
459 if (fscanf(file
, "%d", &sizes
[i
]) < 1)
465 static void reverse_list(int *list
, int len
)
470 for (i
= 0; 2 * i
< len
; ++i
) {
472 list
[i
] = list
[len
- 1 - i
];
473 list
[len
- 1 - i
] = t
;
477 /* Read user specified sizes from "tile.sizes", "block.sizes" and "grid.sizes"
478 * after filling in some potentially useful defaults.
480 static void read_sizes(struct cuda_gen
*gen
)
484 gen
->tile_size
= isl_alloc_array(gen
->ctx
, int, gen
->tile_len
);
485 assert(gen
->tile_size
);
486 for (n
= 0; n
< gen
->tile_len
; ++n
)
487 gen
->tile_size
[n
] = gen
->options
->tile_size
;
488 read_sizes_from_file(gen
, "tile.sizes", gen
->tile_size
, gen
->tile_len
);
491 gen
->n_block
= (n
<= 3) ? n
: 3;
492 switch (gen
->n_block
) {
494 gen
->block_dim
[0] = 512;
497 gen
->block_dim
[0] = 32;
498 gen
->block_dim
[1] = 16;
501 gen
->block_dim
[0] = 32;
502 gen
->block_dim
[1] = 4;
503 gen
->block_dim
[2] = 4;
506 read_sizes_from_file(gen
, "block.sizes", gen
->block_dim
, gen
->n_block
);
507 reverse_list(gen
->block_dim
, gen
->n_block
);
509 gen
->n_grid
= (n
<= 2) ? n
: 2;
510 switch (gen
->n_grid
) {
512 gen
->grid_dim
[0] = 32768;
515 gen
->grid_dim
[0] = 256;
516 gen
->grid_dim
[1] = 256;
519 read_sizes_from_file(gen
, "grid.sizes", gen
->grid_dim
, gen
->n_grid
);
520 reverse_list(gen
->grid_dim
, gen
->n_grid
);
523 static void free_stmts(struct cuda_stmt
*stmts
, int n
)
527 for (i
= 0; i
< n
; ++i
) {
528 struct cuda_stmt_access
*access
, *next
;
530 for (access
= stmts
[i
].accesses
; access
; access
= next
) {
532 isl_map_free(access
->access
);
536 isl_set_free(stmts
[i
].domain
);
541 void clear_cuda_gen(struct cuda_gen
*gen
)
543 free_stmts(gen
->stmts
, gen
->n_stmts
);
544 free_array_info(gen
);
545 isl_set_free(gen
->context
);
546 isl_union_set_free(gen
->copy_in
);
547 isl_union_map_free(gen
->sched
);
548 isl_union_map_free(gen
->read
);
549 isl_union_map_free(gen
->write
);
552 static void print_reverse_list(FILE *out
, int len
, int *list
)
560 for (i
= 0; i
< len
; ++i
) {
563 fprintf(out
, "%d", list
[len
- 1 - i
]);
568 static void print_kernel_launch(struct cuda_gen
*gen
,
569 __isl_keep isl_union_set
*arrays
)
576 print_indent(gen
->code
.dst
, gen
->code
.indent
);
577 fprintf(gen
->code
.dst
, "kernel%d <<<k%d_dimGrid, k%d_dimBlock>>> (",
578 gen
->kernel_id
, gen
->kernel_id
, gen
->kernel_id
);
579 fprintf(gen
->cuda
.kernel_c
, "__global__ void kernel%d(",
581 fprintf(gen
->cuda
.kernel_h
, "__global__ void kernel%d(",
584 for (i
= 0; i
< gen
->n_array
; ++i
) {
589 dim
= isl_space_copy(gen
->array
[i
].dim
);
590 arr
= isl_union_set_extract_set(arrays
, dim
);
591 empty
= isl_set_fast_is_empty(arr
);
597 fprintf(gen
->code
.dst
, ", ");
598 fprintf(gen
->cuda
.kernel_c
, ", ");
599 fprintf(gen
->cuda
.kernel_h
, ", ");
602 fprintf(gen
->code
.dst
, "dev_%s", gen
->array
[i
].name
);
603 fprintf(gen
->cuda
.kernel_c
, "%s *%s",
604 gen
->array
[i
].type
, gen
->array
[i
].name
);
605 fprintf(gen
->cuda
.kernel_h
, "%s *%s",
606 gen
->array
[i
].type
, gen
->array
[i
].name
);
611 dim
= isl_union_set_get_space(arrays
);
612 nparam
= isl_space_dim(dim
, isl_dim_param
);
613 for (i
= 0; i
< nparam
; ++i
) {
614 const char *name
= isl_space_get_dim_name(dim
, isl_dim_param
, i
);
616 fprintf(gen
->code
.dst
, ", ");
617 fprintf(gen
->cuda
.kernel_c
, ", ");
618 fprintf(gen
->cuda
.kernel_h
, ", ");
620 fprintf(gen
->code
.dst
, "%s", name
);
621 fprintf(gen
->cuda
.kernel_c
, "int %s", name
);
622 fprintf(gen
->cuda
.kernel_h
, "int %s", name
);
627 for (i
= 0; i
< gen
->tile_first
; ++i
) {
629 fprintf(gen
->code
.dst
, ", ");
630 fprintf(gen
->cuda
.kernel_c
, ", ");
631 fprintf(gen
->cuda
.kernel_h
, ", ");
633 fprintf(gen
->code
.dst
, "h%d", i
);
634 fprintf(gen
->cuda
.kernel_c
, "int h%d", i
);
635 fprintf(gen
->cuda
.kernel_h
, "int h%d", i
);
639 fprintf(gen
->code
.dst
, ");\n");
640 fprintf(gen
->cuda
.kernel_c
, ")\n");
641 fprintf(gen
->cuda
.kernel_h
, ");\n");
643 fprintf(gen
->code
.dst
, "cudaCheckKernel();\n");
646 /* Construct a map from a domain of dimensionality "len"
647 * to a domain of dimensionality "len" + "tile_len" that tiles
648 * the "tile_len" coordinates starting at "first".
649 * In particular, [s_i] -> [s_i / tile_size[i], s_i % tile_size[i]].
650 * "dim" prescribes the parameters.
652 static __isl_give isl_map
*tile(__isl_take isl_space
*dim
, int len
,
653 int first
, int tile_len
, int *tile_size
)
663 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
664 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
+ tile_len
);
665 bmap
= isl_basic_map_universe(isl_space_copy(dim
));
666 ls
= isl_local_space_from_space(dim
);
668 for (i
= 0; i
< len
- tile_len
; ++i
) {
669 int j
= i
< first
? i
: i
+ tile_len
;
670 int k
= i
< first
? i
: i
+ 2 * tile_len
;
672 c
= isl_equality_alloc(isl_local_space_copy(ls
));
673 isl_int_set_si(v
, -1);
674 isl_constraint_set_coefficient(c
, isl_dim_in
, j
, v
);
675 isl_int_set_si(v
, 1);
676 isl_constraint_set_coefficient(c
, isl_dim_out
, k
, v
);
677 bmap
= isl_basic_map_add_constraint(bmap
, c
);
680 for (i
= 0; i
< tile_len
; ++i
) {
681 c
= isl_equality_alloc(isl_local_space_copy(ls
));
682 isl_int_set_si(v
, -1);
683 isl_constraint_set_coefficient(c
, isl_dim_in
, first
+ i
, v
);
684 isl_int_set_si(v
, tile_size
[i
]);
685 isl_constraint_set_coefficient(c
, isl_dim_out
, first
+ i
, v
);
686 isl_int_set_si(v
, 1);
687 isl_constraint_set_coefficient(c
, isl_dim_out
,
688 first
+ i
+ tile_len
, v
);
689 bmap
= isl_basic_map_add_constraint(bmap
, c
);
691 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
692 isl_int_set_si(v
, 1);
693 isl_constraint_set_coefficient(c
, isl_dim_out
,
694 first
+ i
+ tile_len
, v
);
695 bmap
= isl_basic_map_add_constraint(bmap
, c
);
697 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
698 isl_int_set_si(v
, -1);
699 isl_constraint_set_coefficient(c
, isl_dim_out
,
700 first
+ i
+ tile_len
, v
);
701 isl_int_set_si(v
, tile_size
[i
] - 1);
702 isl_constraint_set_constant(c
, v
);
703 bmap
= isl_basic_map_add_constraint(bmap
, c
);
706 isl_local_space_free(ls
);
709 return isl_map_from_basic_map(bmap
);
712 /* Construct a map from a domain of dimensionality "len"
713 * to a domain of dimensionality "len" + "wrap_len" that "wraps"
714 * the "wrap_len" coordinates starting at "first" according to "wrap_size".
715 * In particular, [s_i] -> [s_i, s_i % wrap_size[i]].
716 * To do so, we need extra variables corresponding to [s_i / wrap_size[i]],
717 * that are projected out at the end.
718 * "dim" prescribes the parameters.
720 static __isl_give isl_map
*wrap(__isl_take isl_space
*dim
, int len
,
721 int first
, int wrap_len
, int *wrap_size
)
728 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
729 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
+ 2 * wrap_len
);
730 bmap
= isl_basic_map_universe(isl_space_copy(dim
));
731 ls
= isl_local_space_from_space(dim
);
733 for (i
= 0; i
< len
; ++i
) {
734 int k
= i
< first
+ wrap_len
? i
: i
+ 2 * wrap_len
;
736 c
= isl_equality_alloc(isl_local_space_copy(ls
));
737 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, -1);
738 isl_constraint_set_coefficient_si(c
, isl_dim_out
, k
, 1);
739 bmap
= isl_basic_map_add_constraint(bmap
, c
);
742 for (i
= 0; i
< wrap_len
; ++i
) {
743 c
= isl_equality_alloc(isl_local_space_copy(ls
));
744 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
746 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
747 first
+ wrap_len
+ i
, 1);
748 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
749 first
+ 2 * wrap_len
+ i
, wrap_size
[i
]);
750 bmap
= isl_basic_map_add_constraint(bmap
, c
);
752 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
753 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
754 first
+ wrap_len
+ i
, 1);
755 bmap
= isl_basic_map_add_constraint(bmap
, c
);
757 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
758 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
759 first
+ wrap_len
+ i
, -1);
760 isl_constraint_set_constant_si(c
, wrap_size
[i
] - 1);
761 bmap
= isl_basic_map_add_constraint(bmap
, c
);
764 isl_local_space_free(ls
);
766 bmap
= isl_basic_map_project_out(bmap
, isl_dim_out
,
767 first
+ 2 * wrap_len
, wrap_len
);
769 return isl_map_from_basic_map(bmap
);
772 /* Add "n" parameters named prefix%d.
774 static __isl_give isl_set
*add_params( __isl_take isl_set
*set
,
775 int n
, const char *prefix
)
781 nparam
= isl_set_dim(set
, isl_dim_param
);
782 set
= isl_set_add_dims(set
, isl_dim_param
, n
);
784 for (i
= 0; i
< n
; ++i
) {
785 snprintf(name
, sizeof(name
), "%s%d", prefix
, i
);
786 set
= isl_set_set_dim_name(set
, isl_dim_param
,
793 /* Equate the "n" dimensions of "set" starting at "first" to
794 * freshly created parameters named prefix%d.
796 static __isl_give isl_set
*parametrize(__isl_take isl_set
*set
,
797 int first
, int n
, const char *prefix
)
807 nparam
= isl_set_dim(set
, isl_dim_param
);
809 set
= add_params(set
, n
, prefix
);
811 dim
= isl_set_get_space(set
);
812 bset
= isl_basic_set_universe(isl_space_copy(dim
));
813 ls
= isl_local_space_from_space(dim
);
817 for (i
= 0; i
< n
; ++i
) {
818 c
= isl_equality_alloc(isl_local_space_copy(ls
));
819 isl_int_set_si(v
, -1);
820 isl_constraint_set_coefficient(c
, isl_dim_param
, nparam
+ i
, v
);
821 isl_int_set_si(v
, 1);
822 isl_constraint_set_coefficient(c
, isl_dim_set
, first
+ i
, v
);
823 bset
= isl_basic_set_add_constraint(bset
, c
);
827 isl_local_space_free(ls
);
829 return isl_set_intersect(set
, isl_set_from_basic_set(bset
));
832 static __isl_give isl_set
*parametrization(__isl_take isl_space
*dim
,
833 int len
, int first
, int n
, const char *prefix
)
837 dim
= isl_space_add_dims(dim
, isl_dim_set
, len
);
838 set
= isl_set_universe(dim
);
840 return parametrize(set
, first
, n
, prefix
);
843 /* Tile the B loops over the tile sizes and then tile/wrap
844 * the T1 loops over the blocks.
846 static __isl_give isl_union_map
*tile_schedule(struct cuda_gen
*gen
,
847 __isl_take isl_union_map
*sched
)
850 isl_map
*tiling
, *block_tiling
;
852 dim
= isl_union_map_get_space(sched
);
853 tiling
= tile(isl_space_copy(dim
), gen
->untiled_len
,
854 gen
->tile_first
, gen
->tile_len
, gen
->tile_size
);
856 if (gen
->options
->wrap
)
857 block_tiling
= wrap(dim
, gen
->untiled_len
+ gen
->tile_len
,
858 gen
->tile_first
, gen
->n_grid
, gen
->grid_dim
);
860 block_tiling
= tile(dim
, gen
->untiled_len
+ gen
->tile_len
,
861 gen
->tile_first
, gen
->n_grid
, gen
->grid_dim
);
863 gen
->tiled_len
= gen
->untiled_len
+ gen
->tile_len
+ gen
->n_grid
;
865 tiling
= isl_map_apply_range(tiling
, block_tiling
);
867 sched
= isl_union_map_apply_range(sched
,
868 isl_union_map_from_map(tiling
));
870 gen
->shared_len
= gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
;
875 static __isl_give isl_union_map
*parametrize_tiled_schedule(
876 struct cuda_gen
*gen
, __isl_take isl_union_map
*sched
)
881 dim
= isl_union_map_get_space(sched
);
882 par
= parametrization(dim
, gen
->tiled_len
, 0, gen
->tile_first
, "h");
883 sched
= isl_union_map_intersect_range(sched
,
884 isl_union_set_from_set(par
));
886 dim
= isl_union_map_get_space(sched
);
887 par
= parametrization(dim
, gen
->tiled_len
,
888 gen
->tile_first
+ gen
->n_grid
, gen
->n_grid
, "b");
889 sched
= isl_union_map_intersect_range(sched
,
890 isl_union_set_from_set(par
));
895 /* Tile/wrap the P1 loops over the threads.
897 static __isl_give isl_union_map
*thread_tile_schedule(struct cuda_gen
*gen
,
898 __isl_take isl_union_map
*sched
)
904 dim
= isl_union_map_get_space(sched
);
906 if (gen
->options
->wrap
)
907 tiling
= wrap(isl_space_copy(dim
), gen
->tiled_len
,
908 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
910 tiling
= tile(isl_space_copy(dim
), gen
->tiled_len
,
911 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
912 gen
->thread_tiled_len
= gen
->tiled_len
+ gen
->n_block
;
914 sched
= isl_union_map_apply_range(sched
,
915 isl_union_map_from_map(tiling
));
917 par
= parametrization(dim
, gen
->thread_tiled_len
,
918 gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
+ gen
->n_block
,
920 sched
= isl_union_map_intersect_range(sched
,
921 isl_union_set_from_set(par
));
923 gen
->shared_len
= gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
;
928 /* If the user asked for it, scale the shared memory tile loops
929 * (T1P and T2) of "sched" by gen->tile_size[i].
930 * If we are not performing "wrapping", then additionally scale the T1P
931 * loops by gen->grid_dim[i].
933 static __isl_give isl_union_map
*scale_tile_loops(struct cuda_gen
*gen
,
934 __isl_take isl_union_map
*sched
)
938 isl_basic_map
*scale
;
942 if (!gen
->options
->scale_tile_loops
)
945 dim
= isl_union_map_get_space(sched
);
946 dim
= isl_space_add_dims(dim
, isl_dim_in
, gen
->tiled_len
);
947 dim
= isl_space_add_dims(dim
, isl_dim_out
, gen
->tiled_len
);
948 scale
= isl_basic_map_universe(isl_space_copy(dim
));
949 ls
= isl_local_space_from_space(dim
);
951 for (i
= 0; i
< gen
->tiled_len
; ++i
) {
954 if (i
>= gen
->tile_first
&& i
< gen
->tile_first
+ gen
->n_grid
) {
955 f
= gen
->tile_size
[i
- gen
->tile_first
];
956 if (!gen
->options
->wrap
)
957 f
*= gen
->grid_dim
[i
- gen
->tile_first
];
958 } else if (i
>= gen
->tile_first
+ gen
->n_grid
&&
959 i
< gen
->tile_first
+ gen
->n_grid
+ gen
->tile_len
) {
960 f
= gen
->tile_size
[i
- (gen
->tile_first
+ gen
->n_grid
)];
963 c
= isl_equality_alloc(isl_local_space_copy(ls
));
964 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
965 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
966 scale
= isl_basic_map_add_constraint(scale
, c
);
969 isl_local_space_free(ls
);
971 sched
= isl_union_map_apply_range(sched
,
972 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
977 /* If we are not performing "wrapping" and if the user asked for it,
978 * scale the thread tile loops (P1T) of "sched" by gen->block_dim[i].
980 static __isl_give isl_union_map
*scale_thread_tile_loops(struct cuda_gen
*gen
,
981 __isl_take isl_union_map
*sched
)
985 isl_basic_map
*scale
;
989 if (gen
->options
->wrap
)
991 if (!gen
->options
->scale_tile_loops
)
994 dim
= isl_union_map_get_space(sched
);
995 dim
= isl_space_add_dims(dim
, isl_dim_in
, gen
->thread_tiled_len
);
996 dim
= isl_space_add_dims(dim
, isl_dim_out
, gen
->thread_tiled_len
);
997 scale
= isl_basic_map_universe(isl_space_copy(dim
));
998 ls
= isl_local_space_from_space(dim
);
1000 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
) {
1003 if (i
>= gen
->shared_len
&&
1004 i
< gen
->shared_len
+ gen
->n_block
)
1005 f
= gen
->block_dim
[i
- gen
->shared_len
];
1007 c
= isl_equality_alloc(isl_local_space_copy(ls
));
1008 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
1009 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
1010 scale
= isl_basic_map_add_constraint(scale
, c
);
1013 isl_local_space_free(ls
);
1015 sched
= isl_union_map_apply_range(sched
,
1016 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
1021 /* If we are not performing "wrapping" and if the user asked for it,
1022 * scale the "n_tile" loops starting at "first" of "sched" by gen->block_dim[i].
1024 static __isl_give isl_union_map
*scale_access_tile_loops(struct cuda_gen
*gen
,
1025 __isl_take isl_union_map
*sched
, int len
, int first
, int n_tile
)
1029 isl_basic_map
*scale
;
1031 isl_local_space
*ls
;
1033 if (gen
->options
->wrap
)
1035 if (!gen
->options
->scale_tile_loops
)
1038 dim
= isl_union_map_get_space(sched
);
1039 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
1040 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
);
1041 scale
= isl_basic_map_universe(isl_space_copy(dim
));
1042 ls
= isl_local_space_from_space(dim
);
1044 for (i
= 0; i
< len
; ++i
) {
1047 if (i
>= first
&& i
< first
+ n_tile
)
1048 f
= gen
->block_dim
[i
- first
];
1050 c
= isl_equality_alloc(isl_local_space_copy(ls
));
1051 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
1052 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
1053 scale
= isl_basic_map_add_constraint(scale
, c
);
1056 isl_local_space_free(ls
);
1058 sched
= isl_union_map_apply_range(sched
,
1059 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
1064 /* If print_user_stmt is set, we want to print the statements ourselves,
1065 * instead of relying on the C preprocessor. If so, we need to use
1066 * the stop option so that the domains will be saved on the statement
1069 static void print_cloog_shared_body(struct cuda_gen
*gen
,
1070 __isl_keep isl_set
*context
, __isl_keep isl_union_map
*sched
, int len
,
1071 void (*print_user_stmt
)(struct gpucode_info
*info
,
1072 struct clast_user_stmt
*s
),
1076 CloogOptions
*options
;
1077 CloogDomain
*cloog_context
;
1078 CloogUnionDomain
*ud
;
1080 struct clast_stmt
*stmt
;
1083 sched
= isl_union_map_copy(sched
);
1084 sched
= isl_union_map_align_params(sched
, isl_set_get_space(context
));
1086 options
= cloog_options_malloc(gen
->state
);
1087 options
->language
= CLOOG_LANGUAGE_C
;
1088 options
->strides
= 1;
1092 options
->override
= 1;
1093 options
->save_domains
= 1;
1094 options
->noscalars
= 1;
1095 options
->first_unroll
= first_unroll
;
1097 ud
= cloog_union_domain_from_isl_union_map(sched
);
1098 for (i
= 0; i
< len
; ++i
) {
1099 snprintf(name
, sizeof(name
), "c%d", i
);
1100 ud
= cloog_union_domain_set_name(ud
, CLOOG_SCAT
, i
, name
);
1102 cloog_context
= cloog_domain_from_isl_set(isl_set_copy(context
));
1103 input
= cloog_input_alloc(cloog_context
, ud
);
1105 stmt
= cloog_clast_create_from_input(input
, options
);
1107 gen
->stmt_code
.indent
= gen
->kernel_code
.indent
;
1108 gen
->stmt_code
.dst
= gen
->cuda
.kernel_c
;
1109 gen
->stmt_code
.print_user_stmt
= print_user_stmt
;
1110 gen
->stmt_code
.print_user_stmt_list
= NULL
;
1111 gen
->stmt_code
.print_for_head
= NULL
;
1112 gen
->stmt_code
.print_for_foot
= NULL
;
1113 gen
->stmt_code
.user
= gen
;
1114 gpu_print_host_stmt(&gen
->stmt_code
, stmt
);
1116 cloog_clast_free(stmt
);
1117 cloog_options_free(options
);
1120 /* Add "len" parameters p[i] called prefix%d,
1121 * with bounds to 0 <= p[i] < size[i].
1123 __isl_give isl_set
*add_bounded_parameters(__isl_take isl_set
*set
,
1124 int len
, int *size
, const char *prefix
)
1130 isl_basic_set
*bset
;
1132 isl_local_space
*ls
;
1135 nparam
= isl_set_dim(set
, isl_dim_param
);
1136 set
= isl_set_add_dims(set
, isl_dim_param
, len
);
1138 for (i
= 0; i
< len
; ++i
) {
1139 snprintf(name
, sizeof(name
), "%s%d", prefix
, i
);
1140 set
= isl_set_set_dim_name(set
, isl_dim_param
,
1144 dim
= isl_set_get_space(set
);
1145 bset
= isl_basic_set_universe(isl_space_copy(dim
));
1146 ls
= isl_local_space_from_space(dim
);
1150 for (i
= 0; i
< len
; ++i
) {
1151 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
1152 isl_int_set_si(v
, 1);
1153 isl_constraint_set_coefficient(c
, isl_dim_param
, nparam
+ i
, v
);
1154 bset
= isl_basic_set_add_constraint(bset
, c
);
1156 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
1157 isl_int_set_si(v
, -1);
1158 isl_constraint_set_coefficient(c
, isl_dim_param
, nparam
+ i
, v
);
1159 isl_int_set_si(v
, size
[i
] - 1);
1160 isl_constraint_set_constant(c
, v
);
1161 bset
= isl_basic_set_add_constraint(bset
, c
);
1165 isl_local_space_free(ls
);
1167 return isl_set_intersect(set
, isl_set_from_basic_set(bset
));
1170 static void print_shared_body(struct cuda_gen
*gen
,
1171 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*sched
,
1172 int len
, void (*print_user_stmt
)(struct gpucode_info
*info
,
1173 struct clast_user_stmt
*s
),
1178 context
= isl_set_copy(shared_domain
);
1179 context
= parametrize(context
, 0, gen
->shared_len
, "g");
1180 context
= isl_set_project_out(context
, isl_dim_set
, 0, gen
->shared_len
);
1181 context
= add_bounded_parameters(context
,
1182 gen
->n_block
, gen
->block_dim
, "t");
1184 print_cloog_shared_body(gen
, context
, sched
, len
, print_user_stmt
,
1187 isl_set_free(context
);
1190 /* Given a tile of an array, construct a map that maps each element
1191 * of the tile to a copy of the tile shifted to the origin
1192 * (based on the lower bounds in group->private_bound or group->shared_bound).
1193 * If any of the indices is strided, then {private,shared}_bound[i].shift_map
1194 * is applied to the index first.
1195 * The domain of the resulting map is "access",
1196 * while the range space is anonymous.
1198 static __isl_give isl_map
*shift_access(__isl_take isl_set
*access
,
1199 struct cuda_array_ref_group
*group
)
1203 isl_basic_set
*bset
;
1204 isl_basic_map
*bmap
;
1206 isl_basic_set
*offset
;
1207 isl_basic_map
*shift
;
1208 isl_basic_map
*pre_shift
;
1211 struct cuda_array_bound
*bounds
;
1212 int n_index
= group
->array
->n_index
;
1214 bounds
= group
->private_bound
;
1216 bounds
= group
->shared_bound
;
1218 dim
= isl_set_get_space(access
);
1219 dim
= isl_space_drop_dims(dim
, isl_dim_set
, 0, n_index
);
1220 offset
= isl_basic_set_universe(dim
);
1221 for (i
= 0; i
< n_index
; ++i
) {
1222 lb
= isl_aff_copy(bounds
[i
].lb
);
1223 bmap
= isl_basic_map_from_aff(lb
);
1224 bset
= isl_basic_map_range(bmap
);
1225 offset
= isl_basic_set_flat_product(offset
, bset
);
1227 offset
= isl_basic_set_neg(offset
);
1229 dim
= isl_space_map_from_set(isl_set_get_space(access
));
1230 shift
= isl_basic_map_identity(dim
);
1231 shift
= isl_basic_map_set_tuple_name(shift
, isl_dim_out
, NULL
);
1233 bset
= isl_basic_set_universe(isl_set_get_space(access
));
1234 bmap
= isl_basic_map_from_domain_and_range(bset
, offset
);
1236 shift
= isl_basic_map_sum(shift
, bmap
);
1238 dim
= isl_set_get_space(access
);
1239 dim
= isl_space_drop_dims(dim
, isl_dim_set
, 0, n_index
);
1240 dim
= isl_space_map_from_set(dim
);
1241 pre_shift
= isl_basic_map_universe(isl_space_copy(dim
));
1242 dim
= isl_space_add_dims(dim
, isl_dim_in
, 1);
1243 dim
= isl_space_add_dims(dim
, isl_dim_out
, 1);
1244 for (i
= 0; i
< n_index
; ++i
) {
1245 if (!bounds
[i
].shift_map
)
1246 bmap
= isl_basic_map_identity(isl_space_copy(dim
));
1248 bmap
= isl_basic_map_copy(bounds
[i
].shift_map
);
1249 pre_shift
= isl_basic_map_flat_product(pre_shift
, bmap
);
1251 isl_space_free(dim
);
1252 name
= isl_basic_map_get_tuple_name(shift
, isl_dim_in
);
1253 pre_shift
= isl_basic_map_set_tuple_name(pre_shift
, isl_dim_in
, name
);
1254 pre_shift
= isl_basic_map_set_tuple_name(pre_shift
, isl_dim_out
, name
);
1255 shift
= isl_basic_map_apply_range(pre_shift
, shift
);
1257 sched
= isl_map_from_basic_map(shift
);
1258 sched
= isl_map_intersect_domain(sched
, access
);
1263 /* Construct a schedule for iterating over all elements in the given
1264 * piece of an array. The schedule iterates over a copy of the piece
1265 * that is shifted to the origin.
1266 * We subsequently also perform the tiling/wrapping over the threads.
1268 * In particular, we tile the final iterators so that the final thread
1269 * dimension runs over the final array dimension.
1270 * However, if those final iterators have only a single iteration,
1271 * we try to tile earlier iterators instead.
1273 static __isl_give isl_union_map
*access_schedule(struct cuda_gen
*gen
,
1274 __isl_take isl_set
*access
, struct cuda_array_ref_group
*group
)
1278 isl_union_map
*usched
;
1281 unsigned nvar
= isl_set_dim(access
, isl_dim_set
);
1285 sched
= shift_access(access
, group
);
1287 n_tile
= gen
->n_block
;
1288 if (n_tile
> nvar
) {
1290 sched
= isl_map_insert_dims(sched
,
1291 isl_dim_out
, 0, n_tile
- nvar
);
1292 for (i
= 0; i
< n_tile
- nvar
; ++i
)
1293 sched
= isl_map_fix_si(sched
, isl_dim_out
, i
, 0);
1297 first
= nvar
- n_tile
;
1299 for (; first
> 0; first
--)
1300 if (!isl_map_plain_is_fixed(sched
, isl_dim_out
,
1301 first
+ n_tile
- 1, NULL
))
1304 dim
= isl_map_get_space(sched
);
1305 dim
= isl_space_params(dim
);
1306 if (gen
->options
->wrap
)
1307 tiling
= wrap(isl_space_copy(dim
), nvar
, first
,
1308 n_tile
, gen
->block_dim
);
1310 tiling
= tile(isl_space_copy(dim
), nvar
, first
,
1311 n_tile
, gen
->block_dim
);
1312 sched
= isl_map_apply_range(sched
, tiling
);
1314 par
= parametrization(dim
, nvar
+ n_tile
, first
+ n_tile
, n_tile
, "t");
1315 usched
= isl_union_map_from_map(sched
);
1316 usched
= isl_union_map_intersect_range(usched
,
1317 isl_union_set_from_set(par
));
1319 usched
= scale_access_tile_loops(gen
, usched
, nvar
+ n_tile
,
1325 /* Print an access to the element in the global memory copy of the
1326 * given array that corresponds to element [aff[0]][aff[1]]...
1327 * of the original array.
1328 * The copy in global memory has been linearized, so we need to take
1329 * the array size into account.
1331 static void print_global_index(isl_ctx
*ctx
, FILE *out
,
1332 struct cuda_array_info
*array
, __isl_keep isl_aff
**aff
)
1337 if (cuda_array_is_scalar(array
)) {
1338 fprintf(out
, "*%s", array
->name
);
1342 fprintf(out
, "%s[", array
->name
);
1343 prn
= isl_printer_to_file(ctx
, out
);
1344 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
1345 for (i
= 0; i
+ 1 < array
->n_index
; ++i
)
1346 prn
= isl_printer_print_str(prn
, "(");
1347 for (i
= 0; i
< array
->n_index
; ++i
) {
1349 prn
= isl_printer_print_str(prn
, ") * (");
1350 prn
= isl_printer_print_pw_aff(prn
,
1351 array
->local_bound
[i
]);
1352 prn
= isl_printer_print_str(prn
, ") + ");
1354 prn
= isl_printer_print_aff(prn
, aff
[i
]);
1356 isl_printer_free(prn
);
1360 /* Given an index expression into a tile of an array, adjust the expression
1361 * to a shift of the tile to the origin
1362 * (based on the lower bounds in array->shared_bound).
1363 * If the index is strided, then we first add
1364 * bound->shift and divide by bound->stride.
1366 static __isl_give isl_aff
*shift_index(__isl_take isl_aff
*aff
,
1367 struct cuda_array_info
*array
,
1368 struct cuda_array_bound
*bound
, __isl_take isl_set
*domain
)
1374 shift
= bound
->shift
;
1375 shift
= isl_aff_copy(shift
);
1376 shift
= isl_aff_project_domain_on_params(shift
);
1377 shift
= isl_aff_align_params(shift
, isl_aff_get_space(aff
));
1378 aff
= isl_aff_add(aff
, shift
);
1379 aff
= isl_aff_scale_down(aff
, bound
->stride
);
1382 lb
= isl_aff_copy(bound
->lb
);
1383 lb
= isl_aff_project_domain_on_params(lb
);
1385 lb
= isl_aff_align_params(lb
, isl_aff_get_space(aff
));
1387 aff
= isl_aff_sub(aff
, lb
);
1388 aff
= isl_aff_gist(aff
, domain
);
1393 /* Print an access to the element in the private/shared memory copy of the
1394 * given array reference group that corresponds to element [affs[0]][affs[1]]...
1395 * of the original array.
1396 * Since the array in private/shared memory is just a shifted copy of part
1397 * of the original array, we simply need to subtract the lower bound,
1398 * which was computed in can_tile_for_shared_memory.
1399 * If any of the indices is strided, then we first add
1400 * bounds[i].shift and divide by bounds[i].stride.
1402 static void print_local_index(isl_ctx
*ctx
, FILE *out
,
1403 struct cuda_array_ref_group
*group
, struct cuda_array_bound
*bounds
,
1404 __isl_keep isl_aff
**affs
, __isl_keep isl_set
*domain
)
1408 struct cuda_array_info
*array
= group
->array
;
1410 print_array_name(out
, group
);
1411 for (i
= 0; i
< array
->n_index
; ++i
) {
1412 isl_aff
*aff
= isl_aff_copy(affs
[i
]);
1414 aff
= shift_index(aff
, array
, &bounds
[i
], isl_set_copy(domain
));
1417 prn
= isl_printer_to_file(ctx
, out
);
1418 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
1419 prn
= isl_printer_print_aff(prn
, aff
);
1420 isl_printer_free(prn
);
1426 /* This function is called for each leaf in the clast of the code
1427 * for copying to or from shared/private memory.
1428 * The statement name is {read,write}_{shared,private}_<array>.
1430 * The schedule iterates over the array elements, so we can use
1431 * the domain of copy_sched at the current scheduling position
1432 * as the index of the array.
1434 static void print_copy_statement(struct gpucode_info
*code
,
1435 struct clast_user_stmt
*u
)
1437 struct cuda_gen
*gen
= code
->user
;
1440 struct cuda_array_ref_group
*group
= gen
->copy_group
;
1441 struct cuda_array_bound
*bounds
= gen
->copy_bound
;
1453 read
= !strncmp(u
->statement
->name
, "read", 4);
1455 domain
= extract_host_domain(u
);
1458 sched
= isl_map_copy(gen
->copy_sched
);
1459 sched
= isl_map_reverse(sched
);
1460 sched
= isl_map_intersect_domain(sched
, domain
);
1461 n_in
= isl_map_dim(sched
, isl_dim_in
);
1462 n_out
= isl_map_dim(sched
, isl_dim_out
);
1463 dim
= isl_map_get_space(sched
);
1464 dim
= isl_space_drop_dims(dim
, isl_dim_in
, 0, n_in
);
1465 dim
= isl_space_drop_dims(dim
, isl_dim_out
, 0, n_out
);
1466 param
= parametrization(dim
, n_in
, 0, n_in
, "c");
1467 sched
= isl_map_align_params(sched
, isl_set_get_space(param
));
1468 sched
= isl_map_intersect_domain(sched
, param
);
1469 index
= isl_map_range(sched
);
1470 domain
= isl_set_copy(index
);
1471 aff
= isl_set_affine_hull(index
);
1472 domain
= isl_set_params(domain
);
1474 ctx
= isl_basic_set_get_ctx(aff
);
1475 affs
= isl_alloc_array(ctx
, isl_aff
*, n_out
);
1478 for (i
= 0; i
< n_out
; ++i
) {
1482 ok
= isl_basic_set_has_defining_equality(aff
,
1483 isl_dim_set
, i
, &c
);
1485 affs
[i
] = isl_constraint_get_bound(c
, isl_dim_set
, i
);
1486 isl_constraint_free(c
);
1487 affs
[i
] = isl_aff_project_domain_on_params(affs
[i
]);
1490 print_indent(code
->dst
, code
->indent
);
1492 print_local_index(ctx
, code
->dst
, group
, bounds
, affs
, domain
);
1493 fprintf(code
->dst
, " = ");
1494 print_global_index(ctx
, code
->dst
, group
->array
, affs
);
1496 print_global_index(ctx
, code
->dst
, group
->array
, affs
);
1497 fprintf(code
->dst
, " = ");
1498 print_local_index(ctx
, code
->dst
, group
, bounds
, affs
, domain
);
1500 fprintf(code
->dst
, ";\n");
1502 for (i
= 0; i
< n_out
; ++i
)
1503 isl_aff_free(affs
[i
]);
1506 isl_basic_set_free(aff
);
1507 isl_set_free(domain
);
1510 static void print_shared_access(struct cuda_gen
*gen
,
1511 __isl_keep isl_set
*shared_domain
, __isl_take isl_set
*access
,
1512 const char *type
, struct cuda_array_ref_group
*group
)
1514 const char *array_name
;
1517 isl_union_map
*sched
;
1518 unsigned nvar
= isl_set_dim(access
, isl_dim_set
);
1521 ctx
= isl_set_get_ctx(access
);
1522 array_name
= isl_set_get_tuple_name(access
);
1523 name
= isl_alloc_array(ctx
, char,
1524 strlen(type
) + sizeof("_shared_") + strlen(array_name
) + 20);
1525 if (group
->array
->n_group
> 1)
1526 sprintf(name
, "%s_shared_%s_%d", type
, array_name
, group
->nr
);
1528 sprintf(name
, "%s_shared_%s", type
, array_name
);
1529 access
= isl_set_set_tuple_name(access
, name
);
1532 sched
= access_schedule(gen
, access
, group
);
1534 n_tile
= gen
->n_block
;
1538 gen
->copy_sched
= isl_map_from_union_map(isl_union_map_copy(sched
));
1539 gen
->copy_group
= group
;
1540 gen
->copy_bound
= group
->shared_bound
;
1542 print_shared_body(gen
, shared_domain
, sched
, nvar
+ n_tile
,
1543 &print_copy_statement
, -1);
1545 isl_union_map_free(sched
);
1546 isl_map_free(gen
->copy_sched
);
1549 /* Return the union of all read (read = 1) and/or write (write = 1)
1550 * access relations in the group.
1552 static __isl_give isl_union_map
*group_access_relation(
1553 struct cuda_array_ref_group
*group
, int read
, int write
)
1556 isl_union_map
*access
;
1558 access
= isl_union_map_empty(isl_map_get_space(group
->access
));
1559 for (i
= 0; i
< group
->n_ref
; ++i
) {
1562 if (!((read
&& group
->refs
[i
]->read
) ||
1563 (write
&& group
->refs
[i
]->write
)))
1565 map_i
= isl_map_copy(group
->refs
[i
]->access
);
1566 access
= isl_union_map_union(access
,
1567 isl_union_map_from_map(map_i
));
1573 /* Check that none of the shared memory tiles involve any strides.
1575 static int no_strides(struct cuda_array_ref_group
*group
)
1578 int n_index
= group
->array
->n_index
;
1580 for (i
= 0; i
< n_index
; ++i
)
1581 if (group
->shared_bound
[i
].shift
)
1587 /* Return a set containing the values of the given index i
1588 * of the elements in the array tile in global memory that corresponds
1589 * to the shared memory copy.
1590 * In particular, if a is the index, we return a set with constraints
1592 * tile_offset <= a <= tile_offset + tile_size - 1
1596 * 0 <= a <= array_size - 1
1599 static __isl_give isl_set
*group_tile_dim(struct cuda_array_ref_group
*group
,
1602 isl_basic_set
*tile
;
1605 isl_local_space
*ls
;
1610 aff
= isl_aff_copy(group
->shared_bound
[i
].lb
);
1611 aff
= isl_aff_add_dims(aff
, isl_dim_in
, 1);
1612 ls
= isl_aff_get_domain_local_space(aff
);
1613 aff
= isl_aff_neg(aff
);
1614 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
1615 c
= isl_inequality_from_aff(isl_aff_copy(aff
));
1616 tile
= isl_basic_set_from_constraint(c
);
1618 aff
= isl_aff_neg(aff
);
1619 aff
= isl_aff_add_constant(aff
, group
->shared_bound
[i
].size
);
1620 aff
= isl_aff_add_constant_si(aff
, -1);
1621 c
= isl_inequality_from_aff(aff
);
1622 tile
= isl_basic_set_add_constraint(tile
, c
);
1624 aff
= isl_aff_zero_on_domain(ls
);
1625 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
1626 c
= isl_inequality_from_aff(aff
);
1627 tile
= isl_basic_set_add_constraint(tile
, c
);
1629 bound
= isl_pw_aff_copy(group
->array
->bound
[i
]);
1630 bound
= isl_pw_aff_add_dims(bound
, isl_dim_in
, 1);
1631 ls
= isl_local_space_from_space(isl_pw_aff_get_domain_space(bound
));
1632 aff
= isl_aff_zero_on_domain(ls
);
1633 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
1634 aff
= isl_aff_add_constant_si(aff
, 1);
1635 dom
= isl_pw_aff_domain(isl_pw_aff_copy(bound
));
1637 tile_set
= isl_pw_aff_ge_set(bound
, isl_pw_aff_alloc(dom
, aff
));
1638 tile_set
= isl_set_align_params(tile_set
, isl_basic_set_get_space(tile
));
1639 tile_set
= isl_set_intersect(tile_set
, isl_set_from_basic_set(tile
));
1644 /* Return a set containing the elements in the array tile in
1645 * global memory that corresponds to the shared memory copy.
1647 static __isl_give isl_set
*group_tile(struct cuda_array_ref_group
*group
)
1650 int n_index
= group
->array
->n_index
;
1653 tile
= group_tile_dim(group
, 0);
1654 for (i
= 1; i
< n_index
; ++i
) {
1657 tile_i
= group_tile_dim(group
, i
);
1658 tile
= isl_set_flat_product(tile
, tile_i
);
1661 tile
= isl_set_set_tuple_name(tile
, group
->array
->name
);
1666 /* Print code for reading into or writing from shared memory
1667 * the given array reference group.
1669 * sched maps the original iteration domains to the shared memory tile loops.
1671 * If we are performing a read from global memory to shared memory,
1672 * if the array involved is not a scalar and if the definition of the
1673 * shared memory tiles does not involve any strides, then we copy
1674 * the entire tile to shared memory. This may result in some extra
1675 * elements getting copied, but it should lead to simpler code
1676 * (which means that fewer registers may be needed) and less divergence.
1678 * Otherwise, we only copy the elements that will be read or have been written
1681 * Note that the absence of stride requirement can easily be lifted.
1682 * We would just need to add constraints of the form
1684 * shift + a = stride * alpha
1686 static int print_group_shared_accesses(struct cuda_gen
*gen
,
1687 struct cuda_array_ref_group
*group
, const char *type
,
1688 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*sched
)
1691 isl_union_map
*access
;
1692 isl_union_set
*uset
;
1693 isl_set
*access_set
;
1695 if (group
->private_bound
)
1697 if (!group
->shared_bound
)
1700 read
= !strcmp(type
, "read");
1702 access
= group_access_relation(group
, read
, !read
);
1703 access
= isl_union_map_apply_domain(access
, isl_union_map_copy(sched
));
1704 uset
= isl_union_map_range(access
);
1706 if (isl_union_set_is_empty(uset
)) {
1707 isl_union_set_free(uset
);
1711 if (read
&& group
->array
->n_index
> 0 && no_strides(group
)) {
1712 isl_union_set_free(uset
);
1713 access_set
= group_tile(group
);
1714 print_shared_access(gen
, shared_domain
, access_set
,
1719 access_set
= isl_set_from_union_set(uset
);
1720 access_set
= isl_set_coalesce(access_set
);
1722 print_shared_access(gen
, shared_domain
, access_set
, type
, group
);
1727 /* Print code for reading into or writing from shared memory at
1728 * the given level (-1 for innermost).
1730 * If we are not printing at the innermost level, then the dimensionality
1731 * of shared_domain may be smaller than gen->shared_len.
1732 * As the rest of the code assumes that the domain of access has
1733 * gen->shared_len dimensions, we therefore may need to embed this domain
1734 * in a higher dimensional space after intersection with shared_domain.
1736 static void print_shared_accesses(struct cuda_gen
*gen
,
1737 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*access
,
1738 const char *type
, int level
)
1744 int shared_len
= isl_set_dim(shared_domain
, isl_dim_set
);
1746 isl_union_map
*sched
;
1748 shared_domain
= isl_set_copy(shared_domain
);
1749 sched
= isl_union_map_copy(gen
->tiled_sched
);
1750 dim
= isl_union_map_get_space(sched
);
1751 proj
= projection(dim
, gen
->tiled_len
, shared_len
);
1752 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
1753 sched
= isl_union_map_intersect_range(sched
,
1754 isl_union_set_from_set(isl_set_copy(shared_domain
)));
1755 if (shared_len
!= gen
->shared_len
) {
1756 dim
= isl_union_map_get_space(sched
);
1757 proj
= projection(dim
, gen
->shared_len
, shared_len
);
1758 proj
= isl_map_reverse(proj
);
1759 shared_domain
= isl_set_apply(shared_domain
,
1760 isl_map_copy(proj
));
1761 sched
= isl_union_map_apply_range(sched
,
1762 isl_union_map_from_map(proj
));
1765 dim
= isl_union_map_get_space(sched
);
1766 par
= parametrization(dim
, gen
->shared_len
, 0, gen
->shared_len
, "g");
1767 sched
= isl_union_map_intersect_range(sched
,
1768 isl_union_set_from_set(par
));
1770 for (i
= 0; i
< gen
->n_array
; ++i
) {
1771 struct cuda_array_info
*array
= &gen
->array
[i
];
1773 if (gen
->array
[i
].print_shared_level
!= level
)
1776 for (j
= 0; j
< array
->n_group
; ++j
) {
1777 if (print_group_shared_accesses(gen
, array
->groups
[j
],
1778 type
, shared_domain
, sched
))
1783 isl_union_map_free(sched
);
1784 isl_set_free(shared_domain
);
1787 print_indent(gen
->cuda
.kernel_c
, gen
->kernel_code
.indent
);
1788 fprintf(gen
->cuda
.kernel_c
, "__syncthreads();\n");
1792 /* This function is called for each access to an array in some statement
1793 * in the original code.
1794 * Replace that access by an access to shared or (linearized) global memory.
1795 * Since the array in shared memory is just
1796 * a shifted copy of part of the original array, we simply need
1797 * to subtract the lower bound, which was computed
1798 * in can_tile_for_shared_memory.
1799 * If any of the indices is strided, then we first add
1800 * shared_bound[i].shift and divide by shared_bound[i].stride.
1802 * If the given array is accessed directly from global memory,
1803 * we don't need to perform any shifting and simply simplify
1804 * expression in the context of the domain instead.
1806 * If the array space (range of access) has no name, then we are
1807 * accessing an iterator in the original program.
1809 static void print_access(struct cuda_gen
*gen
, __isl_take isl_map
*access
,
1815 struct cuda_array_info
*array
= NULL
;
1820 struct cuda_array_bound
*bounds
= NULL
;
1822 access
= isl_map_align_params(access
,
1823 isl_set_get_space(gen
->stmt_domain
));
1825 data_set
= isl_set_apply(isl_set_copy(gen
->stmt_domain
), access
);
1827 name
= isl_set_get_tuple_name(data_set
);
1830 fprintf(gen
->cuda
.kernel_c
, "(");
1832 struct cuda_array_ref_group
*group
;
1834 for (i
= 0; i
< gen
->n_array
; ++i
) {
1835 if (strcmp(name
, gen
->array
[i
].name
))
1837 array
= &gen
->array
[i
];
1840 group
= array
->groups
[group_nr
];
1841 bounds
= group
->private_bound
;
1843 bounds
= group
->shared_bound
;
1845 if (!bounds
&& cuda_array_is_scalar(array
))
1846 fprintf(gen
->cuda
.kernel_c
, "*");
1847 print_array_name(gen
->cuda
.kernel_c
, group
);
1849 if (cuda_array_is_scalar(array
)) {
1850 isl_set_free(data_set
);
1854 fprintf(gen
->cuda
.kernel_c
, "[");
1858 n_index
= isl_set_dim(data_set
, isl_dim_set
);
1859 aff
= isl_set_affine_hull(data_set
);
1861 prn
= isl_printer_to_file(gen
->ctx
, gen
->cuda
.kernel_c
);
1862 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
1865 for (i
= 0; i
+ 1 < n_index
; ++i
)
1866 prn
= isl_printer_print_str(prn
, "(");
1868 for (i
= 0; i
< n_index
; ++i
) {
1873 ok
= isl_basic_set_has_defining_equality(aff
,
1874 isl_dim_out
, i
, &c
);
1876 index
= isl_constraint_get_bound(c
, isl_dim_out
, i
);
1877 isl_constraint_free(c
);
1878 index
= isl_aff_project_domain_on_params(index
);
1881 prn
= isl_printer_print_aff(prn
, index
);
1882 isl_aff_free(index
);
1886 domain
= isl_set_copy(gen
->stmt_domain
);
1887 domain
= isl_set_params(domain
);
1889 index
= isl_aff_gist(index
, domain
);
1891 index
= shift_index(index
, array
, &bounds
[i
], domain
);
1895 prn
= isl_printer_print_str(prn
, ") * (");
1896 prn
= isl_printer_print_pw_aff(prn
,
1897 array
->local_bound
[i
]);
1898 prn
= isl_printer_print_str(prn
, ") + ");
1900 prn
= isl_printer_print_str(prn
, "][");
1902 prn
= isl_printer_print_aff(prn
, index
);
1903 isl_aff_free(index
);
1906 prn
= isl_printer_print_str(prn
, ")");
1908 prn
= isl_printer_print_str(prn
, "]");
1909 isl_printer_free(prn
);
1911 isl_basic_set_free(aff
);
1914 static struct cuda_stmt_access
*print_expr(struct cuda_gen
*gen
, FILE *out
,
1915 struct pet_expr
*expr
, struct cuda_stmt_access
*access
, int outer
)
1919 switch (expr
->type
) {
1920 case pet_expr_double
:
1921 fprintf(out
, "%g", expr
->d
);
1923 case pet_expr_access
:
1924 print_access(gen
, isl_map_copy(access
->access
), access
->group
);
1925 access
= access
->next
;
1927 case pet_expr_unary
:
1930 fprintf(out
, " %s ", pet_op_str(expr
->op
));
1931 access
= print_expr(gen
, out
, expr
->args
[pet_un_arg
],
1936 case pet_expr_binary
:
1939 access
= print_expr(gen
, out
, expr
->args
[pet_bin_lhs
],
1941 fprintf(out
, " %s ", pet_op_str(expr
->op
));
1942 access
= print_expr(gen
, out
, expr
->args
[pet_bin_rhs
],
1947 case pet_expr_ternary
:
1950 access
= print_expr(gen
, out
, expr
->args
[pet_ter_cond
],
1952 fprintf(out
, " ? ");
1953 access
= print_expr(gen
, out
, expr
->args
[pet_ter_true
],
1955 fprintf(out
, " : ");
1956 access
= print_expr(gen
, out
, expr
->args
[pet_ter_false
],
1962 fprintf(out
, "%s(", expr
->name
);
1963 for (i
= 0; i
< expr
->n_arg
; ++i
) {
1966 access
= print_expr(gen
, out
, expr
->args
[i
],
1974 static void print_stmt_body(struct cuda_gen
*gen
,
1975 FILE *out
, struct cuda_stmt
*stmt
)
1977 print_expr(gen
, out
, stmt
->body
, stmt
->accesses
, 1);
1978 fprintf(out
, ";\n");
1981 /* This function is called for each leaf in the innermost clast,
1982 * i.e., for each statement.
1983 * We print the statement body, simplifying the accesses based
1986 static void print_statement(struct gpucode_info
*code
,
1987 struct clast_user_stmt
*u
)
1989 struct cuda_gen
*gen
= code
->user
;
1992 isl_set
*stmt_domain
;
1993 isl_union_map
*stmt_sched
;
1994 isl_union_set
*uset
;
1996 struct cuda_stmt
*stmt
;
1998 nr
= atoi(u
->statement
->name
+ 2);
1999 stmt
= &gen
->stmts
[nr
];
2001 stmt_domain
= extract_host_domain(u
);
2003 stmt_sched
= isl_union_map_intersect_range(
2004 isl_union_map_copy(gen
->local_sched
),
2005 isl_union_set_from_set(extend(stmt_domain
,
2006 gen
->thread_tiled_len
)));
2007 dim
= isl_union_map_get_space(stmt_sched
);
2008 par
= parametrization(dim
, gen
->thread_tiled_len
, 0,
2009 gen
->thread_tiled_len
, "c");
2010 stmt_sched
= isl_union_map_intersect_range(stmt_sched
,
2011 isl_union_set_from_set(par
));
2013 uset
= isl_union_map_domain(stmt_sched
);
2014 dim
= isl_union_set_get_space(uset
);
2015 dim
= isl_space_add_dims(dim
, isl_dim_set
,
2016 isl_set_dim(stmt
->domain
, isl_dim_set
));
2017 dim
= isl_space_set_tuple_name(dim
, isl_dim_set
, u
->statement
->name
);
2018 gen
->stmt_domain
= isl_union_set_extract_set(uset
, dim
);
2019 isl_union_set_free(uset
);
2021 print_indent(code
->dst
, code
->indent
);
2022 print_stmt_body(gen
, code
->dst
, stmt
);
2024 isl_set_free(gen
->stmt_domain
);
2027 static void print_private_access(struct cuda_gen
*gen
,
2028 __isl_keep isl_set
*shared_domain
, __isl_take isl_set
*access
,
2029 const char *type
, struct cuda_array_ref_group
*group
)
2031 const char *array_name
;
2034 unsigned nvar
= isl_set_dim(access
, isl_dim_set
);
2035 isl_union_map
*usched
;
2037 if (isl_set_fast_is_empty(access
)) {
2038 isl_set_free(access
);
2042 ctx
= isl_set_get_ctx(access
);
2043 array_name
= isl_set_get_tuple_name(access
);
2044 name
= isl_alloc_array(ctx
, char,
2045 strlen(type
) + sizeof("_private_") + strlen(array_name
) + 20);
2046 if (group
->array
->n_group
> 1)
2047 sprintf(name
, "%s_private_%s_%d", type
, array_name
, group
->nr
);
2049 sprintf(name
, "%s_private_%s", type
, array_name
);
2050 access
= isl_set_set_tuple_name(access
, name
);
2053 gen
->copy_sched
= shift_access(access
, group
);
2054 gen
->copy_group
= group
;
2055 gen
->copy_bound
= group
->private_bound
;
2057 usched
= isl_union_map_from_map(isl_map_copy(gen
->copy_sched
));
2058 print_shared_body(gen
, shared_domain
, usched
, nvar
,
2059 &print_copy_statement
, 1);
2060 isl_union_map_free(usched
);
2062 isl_map_free(gen
->copy_sched
);
2065 /* Print code for reading into or writing from private memory
2066 * the given array reference group.
2068 * sched maps the original iteration domains to the shared memory tile loops.
2070 static void print_group_private_accesses(struct cuda_gen
*gen
,
2071 struct cuda_array_ref_group
*group
,
2072 const char *type
, __isl_keep isl_set
*shared_domain
,
2073 unsigned first_shared
, int shared_len
, __isl_keep isl_union_map
*sched
)
2076 isl_union_map
*access
;
2077 isl_union_set
*uset
;
2078 isl_set
*access_set
;
2080 if (!group
->private_bound
)
2083 read
= !strcmp(type
, "read");
2085 access
= group_access_relation(group
, read
, !read
);
2086 access
= isl_union_map_apply_domain(access
, isl_union_map_copy(sched
));
2087 access
= isl_union_map_intersect(access
,
2088 isl_union_map_copy(gen
->private_access
));
2089 uset
= isl_union_map_range(access
);
2091 if (isl_union_set_is_empty(uset
)) {
2092 isl_union_set_free(uset
);
2096 access_set
= isl_set_from_union_set(uset
);
2097 access_set
= isl_set_coalesce(access_set
);
2098 access_set
= isl_set_eliminate(access_set
, isl_dim_param
,
2099 first_shared
+ shared_len
,
2100 gen
->shared_len
- shared_len
);
2102 print_private_access(gen
, shared_domain
, access_set
, type
, group
);
2105 /* Print code for reading into or writing from private memory at
2106 * the given level (-1 for innermost).
2108 * If we are not printing at the innermost level, then the dimensionality
2109 * of shared_domain may be smaller than gen->shared_len.
2110 * As the rest of the code assumes that the domain of access has
2111 * gen->shared_len dimensions, we therefore may need to embed this domain
2112 * in a higher dimensional space after intersection with shared_domain.
2114 * This code is very similar to print_shared_accesses.
2115 * The main difference is that we to take into account gen->private_access.
2117 static void print_private_accesses(struct cuda_gen
*gen
,
2118 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*access
,
2119 const char *type
, int level
)
2124 int shared_len
= isl_set_dim(shared_domain
, isl_dim_set
);
2125 unsigned first_shared
;
2126 isl_union_map
*sched
;
2128 shared_domain
= isl_set_copy(shared_domain
);
2129 sched
= isl_union_map_copy(gen
->tiled_sched
);
2130 dim
= isl_union_map_get_space(sched
);
2131 first_shared
= isl_space_dim(dim
, isl_dim_param
);
2132 proj
= projection(dim
, gen
->tiled_len
, shared_len
);
2133 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
2134 sched
= isl_union_map_intersect_range(sched
,
2135 isl_union_set_from_set(isl_set_copy(shared_domain
)));
2136 if (shared_len
!= gen
->shared_len
) {
2137 dim
= isl_union_map_get_space(sched
);
2138 proj
= projection(dim
, gen
->shared_len
, shared_len
);
2139 proj
= isl_map_reverse(proj
);
2140 shared_domain
= isl_set_apply(shared_domain
,
2141 isl_map_copy(proj
));
2142 sched
= isl_union_map_apply_range(sched
,
2143 isl_union_map_from_map(proj
));
2146 for (i
= 0; i
< gen
->n_array
; ++i
) {
2147 struct cuda_array_info
*array
= &gen
->array
[i
];
2149 if (gen
->array
[i
].print_shared_level
!= level
)
2152 for (j
= 0; j
< array
->n_group
; ++j
)
2153 print_group_private_accesses(gen
, array
->groups
[j
],
2154 type
, shared_domain
,
2155 first_shared
, shared_len
, sched
);
2158 isl_union_map_free(sched
);
2159 isl_set_free(shared_domain
);
2162 /* Set unroll[j] if the input dimension j is involved in
2163 * the index expression represented by bmap.
2165 static int check_unroll(__isl_take isl_basic_map
*bmap
, void *user
)
2168 int n_in
= isl_basic_map_dim(bmap
, isl_dim_in
);
2169 int n_out
= isl_basic_map_dim(bmap
, isl_dim_out
);
2172 for (i
= 0; i
< n_out
; ++i
) {
2176 ok
= isl_basic_map_has_defining_equality(bmap
,
2177 isl_dim_out
, i
, &c
);
2179 for (j
= 0; j
< n_in
; ++j
)
2180 if (isl_constraint_involves_dims(c
, isl_dim_in
, j
, 1))
2182 isl_constraint_free(c
);
2185 isl_basic_map_free(bmap
);
2189 /* Given an array pos mapping input dimensions to the corresponding
2190 * output dimension, construct the corresponding map.
2192 static __isl_give isl_map
*permutation(__isl_take isl_space
*dim
,
2197 isl_basic_map
*bmap
;
2198 isl_local_space
*ls
;
2200 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
2201 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
);
2202 bmap
= isl_basic_map_universe(isl_space_copy(dim
));
2203 ls
= isl_local_space_from_space(dim
);
2205 for (i
= 0; i
< len
; ++i
) {
2206 c
= isl_equality_alloc(isl_local_space_copy(ls
));
2207 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, -1);
2208 isl_constraint_set_coefficient_si(c
, isl_dim_out
, pos
[i
], 1);
2209 bmap
= isl_basic_map_add_constraint(bmap
, c
);
2211 isl_local_space_free(ls
);
2213 return isl_map_from_basic_map(bmap
);
2216 /* Find all loops involved in any of the index expressions for any of
2217 * the private accesses, move them innermost and then mark them as
2218 * requiring unrolling by setting gen->first_unroll.
2219 * The loops involved should all be parallel because of the checks
2220 * we performed in check_private_group_access. Moving them innermost
2221 * is therefore a valid transformation.
2223 static __isl_give isl_union_map
*interchange_for_unroll(struct cuda_gen
*gen
,
2224 __isl_take isl_union_map
*sched
)
2227 int unroll
[gen
->thread_tiled_len
];
2228 int perm
[gen
->thread_tiled_len
];
2231 int len
= gen
->shared_len
+ gen
->n_parallel
+ gen
->n_block
;
2233 gen
->first_unroll
= -1;
2235 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
)
2237 for (i
= 0; i
< gen
->n_array
; ++i
) {
2238 struct cuda_array_info
*array
= &gen
->array
[i
];
2240 for (j
= 0; j
< array
->n_group
; ++j
) {
2241 isl_union_map
*access
;
2244 if (!array
->groups
[j
]->private_bound
)
2247 access
= group_access_relation(array
->groups
[j
], 1, 1);
2248 access
= isl_union_map_apply_domain(access
,
2249 isl_union_map_copy(sched
));
2251 acc
= isl_map_from_union_map(access
);
2252 isl_map_foreach_basic_map(acc
, &check_unroll
, unroll
);
2258 for (i
= 0; i
< gen
->shared_len
; ++i
)
2262 for (i
= gen
->shared_len
; i
< len
; ++i
)
2269 for (i
= len
; i
< gen
->thread_tiled_len
; ++i
)
2274 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
)
2277 gen
->first_unroll
= 1 + j
;
2278 for (i
= 0; i
< len
; ++i
)
2282 dim
= isl_union_map_get_space(sched
);
2283 permute
= permutation(dim
, perm
, gen
->thread_tiled_len
);
2284 sched
= isl_union_map_apply_range(sched
,
2285 isl_union_map_from_map(permute
));
2290 /* This function is called for each leaf in the clast of the kernel code.
2291 * We first specialize the schedule to the site of the leaf and
2292 * print code for reading into shared memory, performing the actual
2293 * computations and writing from shared memory, with the required
2296 static void print_kernel_user(struct gpucode_info
*code
,
2297 struct clast_user_stmt
*u
)
2299 struct cuda_gen
*gen
= code
->user
;
2300 isl_set
*shared_domain
;
2302 shared_domain
= extract_entire_host_domain(u
);
2304 print_shared_accesses(gen
, shared_domain
, gen
->read
, "read", -1);
2306 print_private_accesses(gen
, shared_domain
, gen
->read
, "read", -1);
2308 print_shared_body(gen
, shared_domain
, gen
->local_sched
,
2309 gen
->thread_tiled_len
, &print_statement
,
2312 print_private_accesses(gen
, shared_domain
, gen
->write
, "write", -1);
2314 print_indent(gen
->cuda
.kernel_c
, gen
->kernel_code
.indent
);
2315 fprintf(gen
->cuda
.kernel_c
, "__syncthreads();\n");
2317 print_shared_accesses(gen
, shared_domain
, gen
->write
, "write", -1);
2319 isl_set_free(shared_domain
);
2322 /* Check if we need to perform any copying to shared memory at this level
2323 * and if so, print the copying instructions.
2324 * Any array for which we are allowed to print copying instructions at
2325 * this level, but haven't done so already, is printed.
2327 static void print_kernel_for_head(struct gpucode_info
*code
,
2328 struct clast_for
*f
)
2331 struct cuda_gen
*gen
= code
->user
;
2336 domain
= isl_set_from_cloog_domain(cloog_domain_copy(f
->domain
));
2337 level
= isl_set_dim(domain
, isl_dim_set
) - 1;
2339 for (i
= 0; i
< gen
->n_array
; ++i
) {
2340 if (gen
->array
[i
].print_shared_level
>= 0)
2342 if (gen
->array
[i
].last_shared
> level
)
2344 gen
->array
[i
].print_shared_level
= level
;
2349 print_shared_accesses(gen
, domain
, gen
->read
, "read", level
);
2350 print_private_accesses(gen
, domain
, gen
->read
, "read", level
);
2353 isl_set_free(domain
);
2356 /* Print instructions for copying from shared memory for each array
2357 * for which print_kernel_for_head has added copying instructions
2360 static void print_kernel_for_foot(struct gpucode_info
*code
,
2361 struct clast_for
*f
)
2364 struct cuda_gen
*gen
= code
->user
;
2369 domain
= isl_set_from_cloog_domain(cloog_domain_copy(f
->domain
));
2370 level
= isl_set_dim(domain
, isl_dim_set
) - 1;
2372 for (i
= 0; i
< gen
->n_array
; ++i
) {
2373 if (gen
->array
[i
].print_shared_level
!= level
)
2380 print_private_accesses(gen
, domain
, gen
->write
, "write", level
);
2381 print_shared_accesses(gen
, domain
, gen
->write
, "write", level
);
2384 isl_set_free(domain
);
2387 /* Use CLooG to generate code for the outer gen->shared_first loops
2388 * of the local schedule "sched".
2389 * The pretty printing of this code is handled by gpu_print_host_stmt,
2390 * which calls print_kernel_user for each iteration of the shared tile loops.
2392 static void print_cloog_kernel_body(struct cuda_gen
*gen
,
2393 __isl_keep isl_set
*context
, __isl_keep isl_union_map
*sched
)
2396 CloogOptions
*options
;
2397 CloogDomain
*cloog_context
;
2398 CloogUnionDomain
*ud
;
2400 struct clast_stmt
*stmt
;
2403 sched
= isl_union_map_copy(sched
);
2404 sched
= isl_union_map_align_params(sched
, isl_set_get_space(context
));
2406 options
= cloog_options_malloc(gen
->state
);
2407 options
->language
= CLOOG_LANGUAGE_C
;
2408 options
->strides
= 1;
2410 options
->stop
= gen
->shared_len
;
2411 options
->f
= gen
->tiled_len
;
2412 options
->l
= gen
->tiled_len
;
2413 options
->save_domains
= 1;
2414 options
->noscalars
= 1;
2416 ud
= cloog_union_domain_from_isl_union_map(sched
);
2417 for (i
= 0; i
< gen
->shared_len
; ++i
) {
2418 snprintf(name
, sizeof(name
), "g%d", i
);
2419 ud
= cloog_union_domain_set_name(ud
, CLOOG_SCAT
, i
, name
);
2421 cloog_context
= cloog_domain_from_isl_set(isl_set_copy(context
));
2422 input
= cloog_input_alloc(cloog_context
, ud
);
2424 stmt
= cloog_clast_create_from_input(input
, options
);
2426 gen
->kernel_code
.indent
= 4;
2427 gen
->kernel_code
.dst
= gen
->cuda
.kernel_c
;
2428 gen
->kernel_code
.print_user_stmt
= NULL
;
2429 gen
->kernel_code
.print_user_stmt_list
= &print_kernel_user
;
2430 gen
->kernel_code
.print_for_head
= &print_kernel_for_head
;
2431 gen
->kernel_code
.print_for_foot
= &print_kernel_for_foot
;
2432 gen
->kernel_code
.user
= gen
;
2433 gpu_print_host_stmt(&gen
->kernel_code
, stmt
);
2435 cloog_clast_free(stmt
);
2436 cloog_options_free(options
);
2439 static void print_kernel_iterators(struct cuda_gen
*gen
)
2442 const char *block_dims
[] = { "blockIdx.x", "blockIdx.y" };
2443 const char *thread_dims
[] = { "threadIdx.x", "threadIdx.y",
2446 if (gen
->n_grid
> 0) {
2447 print_indent(gen
->cuda
.kernel_c
, 4);
2448 fprintf(gen
->cuda
.kernel_c
, "int ");
2449 for (i
= 0; i
< gen
->n_grid
; ++i
) {
2451 fprintf(gen
->cuda
.kernel_c
, ", ");
2452 fprintf(gen
->cuda
.kernel_c
, "b%d = %s",
2453 i
, block_dims
[gen
->n_grid
- 1 - i
]);
2455 fprintf(gen
->cuda
.kernel_c
, ";\n");
2458 if (gen
->n_block
> 0) {
2459 print_indent(gen
->cuda
.kernel_c
, 4);
2460 fprintf(gen
->cuda
.kernel_c
, "int ");
2461 for (i
= 0; i
< gen
->n_block
; ++i
) {
2463 fprintf(gen
->cuda
.kernel_c
, ", ");
2464 fprintf(gen
->cuda
.kernel_c
, "t%d = %s",
2465 i
, thread_dims
[gen
->n_block
- 1 - i
]);
2467 fprintf(gen
->cuda
.kernel_c
, ";\n");
2471 static void print_group_shared_array(struct cuda_gen
*gen
,
2472 struct cuda_array_ref_group
*group
)
2475 struct cuda_array_bound
*bounds
;
2477 bounds
= group
->private_bound
;
2479 bounds
= group
->shared_bound
;
2483 print_indent(gen
->cuda
.kernel_c
, 4);
2484 fprintf(gen
->cuda
.kernel_c
, "%s%s ",
2485 group
->private_bound
? "" : "__shared__ ", group
->array
->type
);
2486 print_array_name(gen
->cuda
.kernel_c
, group
);
2487 for (j
= 0; j
< group
->array
->n_index
; ++j
) {
2488 fprintf(gen
->cuda
.kernel_c
, "[");
2489 isl_int_print(gen
->cuda
.kernel_c
, bounds
[j
].size
, 0);
2490 fprintf(gen
->cuda
.kernel_c
, "]");
2492 fprintf(gen
->cuda
.kernel_c
, ";\n");
2495 static void print_shared_arrays(struct cuda_gen
*gen
)
2499 for (i
= 0; i
< gen
->n_array
; ++i
) {
2500 struct cuda_array_info
*array
= &gen
->array
[i
];
2502 for (j
= 0; j
< array
->n_group
; ++j
)
2503 print_group_shared_array(gen
, array
->groups
[j
]);
2507 static void print_kernel_body(struct cuda_gen
*gen
,
2508 __isl_keep isl_set
*host_domain
, __isl_keep isl_union_map
*sched
)
2512 context
= isl_set_copy(host_domain
);
2513 context
= parametrize(context
, 0, gen
->tile_first
, "h");
2514 context
= isl_set_project_out(context
, isl_dim_set
, 0, gen
->tile_first
);
2515 context
= add_bounded_parameters(context
,
2516 gen
->n_grid
, gen
->grid_dim
, "b");
2518 print_kernel_iterators(gen
);
2519 print_shared_arrays(gen
);
2521 fprintf(gen
->cuda
.kernel_c
, "\n");
2523 print_cloog_kernel_body(gen
, context
, sched
);
2525 isl_set_free(context
);
2528 /* Given a constraint
2530 * a(p,i) + j = g f(e)
2532 * or -a(p,i) - j = g f(e) if sign < 0,
2533 * store a(p,i) in bound->shift and g (stride) in bound->stride.
2534 * a(p,i) is assumed to be an expression in only the parameters.
2536 static void extract_stride(__isl_keep isl_constraint
*c
,
2537 struct cuda_array_bound
*bound
, isl_int stride
, int sign
)
2545 isl_int_set(bound
->stride
, stride
);
2547 dim
= isl_constraint_get_space(c
);
2548 dim
= isl_space_params(dim
);
2550 nparam
= isl_space_dim(dim
, isl_dim_param
);
2554 isl_constraint_get_constant(c
, &v
);
2557 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(dim
));
2558 aff
= isl_aff_set_constant(aff
, v
);
2560 for (i
= 0; i
< nparam
; ++i
) {
2561 isl_constraint_get_coefficient(c
, isl_dim_param
, i
, &v
);
2562 if (isl_int_is_zero(v
))
2566 aff
= isl_aff_add_coefficient(aff
, isl_dim_param
, i
, v
);
2574 /* Given an equality constraint of a map with a single output dimension j,
2575 * check if the constraint is of the form
2577 * a(p,i) + j = g f(e)
2579 * with a(p,i) an expression in the parameters and input dimensions
2580 * and f(e) an expression in the existentially quantified variables.
2581 * If so, and if g is larger than any such g from a previously considered
2582 * constraint, then call extract_stride. to record the stride information
2585 static int check_stride_constraint(__isl_take isl_constraint
*c
, void *user
)
2590 struct cuda_array_bound
*bound
= user
;
2593 isl_int_init(stride
);
2595 n_div
= isl_constraint_dim(c
, isl_dim_div
);
2596 isl_constraint_get_coefficient(c
, isl_dim_out
, 0, &v
);
2598 if (n_div
&& (isl_int_is_one(v
) || isl_int_is_negone(v
))) {
2599 int s
= isl_int_sgn(v
);
2600 isl_int_set_si(stride
, 0);
2601 for (i
= 0; i
< n_div
; ++i
) {
2602 isl_constraint_get_coefficient(c
, isl_dim_div
, i
, &v
);
2603 isl_int_gcd(stride
, stride
, v
);
2605 if (!isl_int_is_zero(stride
) &&
2606 isl_int_gt(stride
, bound
->stride
))
2607 extract_stride(c
, bound
, stride
, s
);
2610 isl_int_clear(stride
);
2613 isl_constraint_free(c
);
2617 /* Given contraints on an array index i, check if we can find
2618 * a shift a(p) and a stride g such that
2620 * a(p) + i = 0 mod g
2622 * If so, record the information in bound and apply the mapping
2623 * i -> (i + a(p))/g to the array index in bounds and return
2624 * the new constraints.
2625 * If not, simply return the original constraints.
2627 static __isl_give isl_basic_map
*check_stride(struct cuda_gen
*gen
,
2628 struct cuda_array_bound
*bound
, __isl_take isl_basic_map
*bounds
)
2631 isl_basic_map
*shift
;
2634 isl_int_set_si(bound
->stride
, -1);
2636 aff
= isl_basic_map_affine_hull(isl_basic_map_copy(bounds
));
2638 isl_basic_map_foreach_constraint(aff
, &check_stride_constraint
, bound
);
2640 isl_basic_map_free(aff
);
2642 if (isl_int_is_neg(bound
->stride
))
2645 aff_shift
= isl_aff_copy(bound
->shift
);
2646 aff_shift
= isl_aff_add_dims(aff_shift
, isl_dim_in
, 1);
2647 aff_shift
= isl_aff_add_coefficient_si(aff_shift
, isl_dim_in
, 0, 1);
2648 aff_shift
= isl_aff_scale_down(aff_shift
, bound
->stride
);
2649 shift
= isl_basic_map_from_aff(aff_shift
);
2651 bound
->shift_map
= isl_basic_map_copy(shift
);
2652 bounds
= isl_basic_map_apply_range(bounds
, shift
);
2657 struct cuda_size_info
{
2658 isl_basic_set
*bset
;
2659 struct cuda_array_bound
*bound
;
2663 /* Given a constraint from the basic set describing the bounds on
2664 * an array index, check if it is a lower bound, say m i >= b(x), and,
2665 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
2666 * upper bound. If so, and if this bound is smaller than any bound
2667 * derived from earlier constraints, set the size to this bound on
2668 * the expression and the lower bound to ceil(b(x)/m).
2670 static int compute_size_in_direction(__isl_take isl_constraint
*c
, void *user
)
2672 struct cuda_size_info
*size
= user
;
2677 nparam
= isl_basic_set_dim(size
->bset
, isl_dim_param
);
2678 n_div
= isl_constraint_dim(c
, isl_dim_div
);
2680 if (isl_constraint_involves_dims(c
, isl_dim_div
, 0, n_div
)) {
2681 isl_constraint_free(c
);
2687 isl_constraint_get_coefficient(c
, isl_dim_set
, size
->pos
, &v
);
2689 if (isl_int_is_pos(v
)) {
2692 enum isl_lp_result res
;
2694 aff
= isl_constraint_get_bound(c
, isl_dim_set
, size
->pos
);
2695 aff
= isl_aff_ceil(aff
);
2697 lb
= isl_aff_copy(aff
);
2699 aff
= isl_aff_neg(aff
);
2700 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, size
->pos
, 1);
2702 res
= isl_basic_set_max(size
->bset
, aff
, &v
);
2705 if (res
== isl_lp_ok
) {
2706 isl_int_add_ui(v
, v
, 1);
2707 if (isl_int_is_neg(size
->bound
->size
) ||
2708 isl_int_lt(v
, size
->bound
->size
)) {
2709 isl_int_set(size
->bound
->size
, v
);
2710 lb
= isl_aff_drop_dims(lb
, isl_dim_in
,
2712 isl_aff_free(size
->bound
->lb
);
2713 size
->bound
->lb
= isl_aff_copy(lb
);
2720 isl_constraint_free(c
);
2725 /* Given a basic map "bounds" that maps parameters and input dimensions
2726 * to a single output dimension, look for an expression in the parameters
2727 * and input dimensions such that the range of the output dimension shifted
2728 * by this expression is a constant.
2730 * In particular, we currently only consider lower bounds on the output
2731 * dimension as candidate expressions.
2733 static int compute_array_dim_size(struct cuda_gen
*gen
,
2734 struct cuda_array_bound
*bound
, __isl_take isl_basic_map
*bounds
)
2736 struct cuda_size_info size
;
2738 bounds
= isl_basic_map_detect_equalities(bounds
);
2739 bounds
= check_stride(gen
, bound
, bounds
);
2741 isl_int_set_si(bound
->size
, -1);
2745 size
.pos
= isl_basic_map_dim(bounds
, isl_dim_in
);
2746 size
.bset
= isl_basic_map_wrap(bounds
);
2747 size
.bset
= isl_basic_set_flatten(size
.bset
);
2748 size
.bset
= isl_set_simple_hull(isl_basic_set_compute_divs(size
.bset
));
2749 isl_basic_set_foreach_constraint(size
.bset
, &compute_size_in_direction
,
2751 isl_basic_set_free(size
.bset
);
2753 return isl_int_is_nonneg(bound
->size
) ? 0 : -1;
2756 /* Check if we can find a shared memory tile for the given array
2757 * based on the given accesses, and if so, put the results
2758 * in array->shared_bound.
2760 * We project the accesses on each index in turn and look for a parametric
2761 * offset such that the size is constant.
2763 static int can_tile_for_shared_memory(struct cuda_gen
*gen
,
2764 struct cuda_array_info
*array
, __isl_keep isl_map
*access
,
2765 struct cuda_array_bound
*bounds
)
2769 for (i
= 0; i
< array
->n_index
; ++i
) {
2771 isl_basic_map
*hull
;
2773 access_i
= isl_map_copy(access
);
2774 access_i
= isl_map_project_out(access_i
, isl_dim_out
, 0, i
);
2775 access_i
= isl_map_project_out(access_i
, isl_dim_out
,
2776 1, array
->n_index
- (i
+ 1));
2777 access_i
= isl_map_compute_divs(access_i
);
2778 hull
= isl_map_simple_hull(access_i
);
2779 if (compute_array_dim_size(gen
, &bounds
[i
], hull
) < 0)
2786 /* Construct a map with input the shared tile loops and the loops that
2787 * will be wrapped around the threads that relates these later loops
2788 * to the thread indices and the projects them out.
2790 static __isl_give isl_map
*compute_privatization(struct cuda_gen
*gen
)
2798 dim
= isl_union_map_get_space(gen
->shared_sched
);
2800 if (gen
->options
->wrap
)
2801 tiling
= wrap(isl_space_copy(dim
), gen
->shared_len
+ gen
->n_block
,
2802 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
2804 tiling
= tile(isl_space_copy(dim
), gen
->shared_len
+ gen
->n_block
,
2805 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
2809 par
= parametrization(dim
, gen
->shared_len
+ 2 * gen
->n_block
,
2810 gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
+ gen
->n_block
,
2813 priv
= isl_map_align_params(priv
, isl_set_get_space(par
));
2814 priv
= isl_map_intersect_range(priv
, par
);
2816 dim
= isl_map_get_space(priv
);
2817 dim
= isl_space_drop_dims(dim
, isl_dim_in
, 0, isl_space_dim(dim
, isl_dim_in
));
2818 dim
= isl_space_drop_dims(dim
, isl_dim_out
, 0, isl_space_dim(dim
, isl_dim_out
));
2819 proj
= projection(dim
, gen
->shared_len
+ 2 * gen
->n_block
,
2822 priv
= isl_map_apply_range(priv
, proj
);
2827 /* Construct a map from domain_dim to domain_dim that increments
2828 * the dimension at position "pos" and leaves all other dimensions
2831 static __isl_give isl_map
*next(__isl_take isl_space
*domain_dim
, int pos
)
2834 int len
= isl_space_dim(domain_dim
, isl_dim_set
);
2836 isl_basic_map
*next
;
2837 isl_local_space
*ls
;
2839 dim
= isl_space_map_from_set(domain_dim
);
2840 next
= isl_basic_map_universe(isl_space_copy(dim
));
2841 ls
= isl_local_space_from_space(dim
);
2843 for (i
= 0; i
< len
; ++i
) {
2846 c
= isl_equality_alloc(isl_local_space_copy(ls
));
2847 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, 1);
2848 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
2850 isl_constraint_set_constant_si(c
, 1);
2851 next
= isl_basic_map_add_constraint(next
, c
);
2854 isl_local_space_free(ls
);
2856 return isl_map_from_basic_map(next
);
2859 /* Check if the given access is coalesced.
2860 * That is, check whether incrementing the dimension that will get
2861 * wrapped over the last thread index results in incrementing
2862 * the last array index.
2864 * This function is only called for access relations without reuse.
2866 static int access_is_coalesced(struct cuda_gen
*gen
,
2867 __isl_keep isl_union_map
*access
)
2870 isl_map
*access_map
;
2871 isl_map
*next_thread_x
;
2872 isl_map
*next_element
;
2876 access
= isl_union_map_copy(access
);
2877 access
= isl_union_map_apply_domain(access
,
2878 isl_union_map_copy(gen
->tiled_sched
));
2879 access_map
= isl_map_from_union_map(access
);
2881 dim
= isl_map_get_space(access_map
);
2882 dim
= isl_space_domain(dim
);
2883 next_thread_x
= next(dim
, gen
->shared_len
+ gen
->n_block
- 1);
2885 dim
= isl_map_get_space(access_map
);
2886 dim
= isl_space_range(dim
);
2887 next_element
= next(dim
, isl_space_dim(dim
, isl_dim_set
) - 1);
2889 map
= isl_map_apply_domain(next_thread_x
, isl_map_copy(access_map
));
2890 map
= isl_map_apply_range(map
, access_map
);
2892 coalesced
= isl_map_is_subset(map
, next_element
);
2894 isl_map_free(next_element
);
2900 /* For the given array reference group, check whether the access is private
2901 * to the thread. That is, check that any given array element
2902 * is only accessed by a single thread.
2903 * We compute an access relation that maps the shared tile loop iterators
2904 * and the shared point loop iterators that will be wrapped over the
2905 * threads to the array elements.
2906 * We actually check that those iterators that will be wrapped
2907 * partition the array space. This check is stricter than necessary
2908 * since several iterations may be mapped onto the same thread
2909 * and then they could be allowed to access the same memory elements,
2910 * but our check does not allow this situation.
2912 * We also check that the index expression only depends on parallel
2913 * loops. That way, we can move those loops innermost and unroll them.
2914 * Again, we use a test that is stricter than necessary.
2915 * We actually check whether the index expression only depends
2916 * on the iterators that are wrapped over the threads.
2917 * These are necessarily parallel, but there may be more parallel loops.
2919 * Combining the injectivity of the first test with the single-valuedness
2920 * of the second test, we simply test for bijectivity.
2922 * If it turns out we can use registers, we compute the private memory
2923 * tile size using can_tile_for_shared_memory, after introducing a dependence
2924 * on the thread indices.
2926 * Before performing any of the above computations, we first check
2927 * if there is any reuse on the reference group. If not, we simply
2928 * return. If, moreover, the access is coalesced then we also remove
2929 * the shared memory tiling since we should just use global memory instead.
2931 static void check_private_group_access(struct cuda_gen
*gen
,
2932 struct cuda_array_ref_group
*group
)
2935 isl_union_map
*access
;
2936 int n_index
= group
->array
->n_index
;
2938 access
= group_access_relation(group
, 1, 1);
2939 if (isl_union_map_is_injective(access
)) {
2940 if (group
->shared_bound
&& access_is_coalesced(gen
, access
)) {
2941 free_bound_list(group
->shared_bound
, n_index
);
2942 group
->shared_bound
= NULL
;
2944 isl_union_map_free(access
);
2947 access
= isl_union_map_apply_domain(access
,
2948 isl_union_map_copy(gen
->shared_sched
));
2950 acc
= isl_map_from_union_map(access
);
2952 if (!isl_map_is_bijective(acc
)) {
2957 group
->private_bound
= create_bound_list(gen
->ctx
, n_index
);
2958 acc
= isl_map_align_params(acc
, isl_map_get_space(gen
->privatization
));
2959 acc
= isl_map_apply_domain(acc
, isl_map_copy(gen
->privatization
));
2960 if (!can_tile_for_shared_memory(gen
, group
->array
, acc
,
2961 group
->private_bound
)) {
2962 free_bound_list(group
->private_bound
, n_index
);
2963 group
->private_bound
= NULL
;
2969 /* Look for the last shared tile loop that affects the offset of the
2970 * shared or private tile and store the result in array->last_shared.
2972 static void set_last_shared(struct cuda_gen
*gen
,
2973 struct cuda_array_ref_group
*group
)
2976 struct cuda_array_bound
*bounds
;
2977 unsigned first_shared
= gen
->first_shared
;
2978 int n_index
= group
->array
->n_index
;
2980 bounds
= group
->private_bound
;
2982 bounds
= group
->shared_bound
;
2986 for (j
= gen
->shared_len
- 1; j
>= 0; --j
) {
2987 for (i
= 0; i
< n_index
; ++i
) {
2992 if (isl_aff_involves_dims(lb
, isl_dim_param
,
2993 first_shared
+ j
, 1))
2996 shift
= bounds
[i
].shift
;
2999 if (isl_aff_involves_dims(shift
, isl_dim_param
,
3000 first_shared
+ j
, 1))
3006 group
->array
->last_shared
= j
;
3009 /* Compute the sizes of all private arrays for the current kernel,
3010 * as well as the offsets of the private pieces in the original arrays.
3011 * If we cannot or don't want to privatize a given array group,
3012 * we use the shared memory tile sizes computed in
3013 * compute_group_shared_bound instead.
3015 * If a given Array only has a single reference group and if we have
3016 * been able to find a privated or shared tile,
3017 * we also look for the last shared tile loop that affects the offset
3018 * (and therefore the array tile) and store the result in array->last_shared.
3020 * A privatized copy of all access relations from reference groups that
3021 * are mapped to private memory is stored in gen->privatization.
3023 static void compute_private_size(struct cuda_gen
*gen
)
3026 isl_union_map
*private;
3028 if (!gen
->options
->use_private_memory
)
3031 private = isl_union_map_empty(isl_union_map_get_space(gen
->shared_sched
));
3033 for (i
= 0; i
< gen
->n_array
; ++i
) {
3034 struct cuda_array_info
*array
= &gen
->array
[i
];
3036 for (j
= 0; j
< array
->n_group
; ++j
) {
3037 check_private_group_access(gen
, array
->groups
[j
]);
3039 if (!array
->groups
[j
]->private_bound
)
3042 private = isl_union_map_union(private,
3043 group_access_relation(array
->groups
[j
], 1, 1));
3046 array
->last_shared
= gen
->shared_len
- 1;
3047 array
->print_shared_level
= -1;
3049 if (array
->n_group
!= 1)
3051 set_last_shared(gen
, array
->groups
[0]);
3054 if (isl_union_map_is_empty(private))
3055 isl_union_map_free(private);
3057 isl_union_map
*priv
;
3059 private = isl_union_map_apply_domain(private,
3060 isl_union_map_copy(gen
->shared_sched
));
3061 priv
= isl_union_map_from_map(isl_map_copy(gen
->privatization
));
3062 private = isl_union_map_apply_domain(private, priv
);
3063 gen
->private_access
= private;
3067 /* Fill up the groups array with singleton groups, i.e., one group
3068 * per reference, initializing the array, access, write and refs fields.
3069 * In particular the access field is initialized to the scheduled
3070 * access relation of the array reference.
3072 * Return the number of elements initialized, i.e., the number of
3073 * active references in the current kernel.
3075 static int populate_array_references(struct cuda_gen
*gen
,
3076 struct cuda_array_info
*array
, __isl_keep isl_union_map
*sched
,
3077 struct cuda_array_ref_group
**groups
)
3081 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
3084 for (i
= 0; i
< array
->n_ref
; ++i
) {
3085 isl_union_map
*umap
;
3087 struct cuda_array_ref_group
*group
;
3088 struct cuda_stmt_access
*access
= array
->refs
[i
];
3090 map
= isl_map_copy(access
->access
);
3091 umap
= isl_union_map_from_map(map
);
3092 umap
= isl_union_map_apply_domain(umap
,
3093 isl_union_map_copy(sched
));
3095 if (isl_union_map_is_empty(umap
)) {
3096 isl_union_map_free(umap
);
3100 map
= isl_map_from_union_map(umap
);
3102 group
= isl_calloc_type(ctx
, struct cuda_array_ref_group
);
3104 group
->array
= array
;
3105 group
->access
= map
;
3106 group
->write
= access
->write
;
3107 group
->refs
= &array
->refs
[i
];
3109 groups
[n
++] = group
;
3115 static void free_array_ref_group(struct cuda_array_ref_group
*group
,
3120 free_bound_list(group
->shared_bound
, n_index
);
3121 free_bound_list(group
->private_bound
, n_index
);
3122 isl_map_free(group
->access
);
3127 /* If two groups have overlapping access relations and if one of them
3128 * involves a write, then merge the two groups into one.
3130 * We keep track of the grouping in "leader". leader[j] points to
3131 * an earlier group array element that belongs to the same group,
3132 * or the array element j itself if this element is the first in the group.
3134 * Return the number of group leaders.
3136 static int group_overlapping_writes(int n
,
3137 struct cuda_array_ref_group
**groups
, int *leader
)
3142 for (i
= 0; i
< n
; ++i
) {
3144 groups
[l
]->n_ref
= 1;
3145 for (j
= i
- 1; j
>= 0; --j
) {
3151 if (!groups
[l
]->write
&& !groups
[j
]->write
)
3154 map
= isl_map_intersect(isl_map_copy(groups
[l
]->access
),
3155 isl_map_copy(groups
[j
]->access
));
3156 empty
= isl_map_is_empty(map
);
3162 groups
[j
]->access
= isl_map_union(groups
[j
]->access
,
3164 groups
[j
]->write
= 1;
3165 groups
[l
]->access
= NULL
;
3166 groups
[j
]->n_ref
+= groups
[l
]->n_ref
;
3176 /* Compute the size of the shared array corresponding to the given array
3177 * array refrence group, based on the accesses from the current kernel,
3178 * as well as the offset of the shared piece in the original array.
3180 static void compute_group_shared_bound(struct cuda_gen
*gen
,
3181 struct cuda_array_info
*array
, struct cuda_array_ref_group
*group
)
3183 isl_ctx
*ctx
= isl_space_get_ctx(array
->dim
);
3185 if (!gen
->options
->use_shared_memory
)
3188 group
->shared_bound
= create_bound_list(ctx
, array
->n_index
);
3189 if (!can_tile_for_shared_memory(gen
, array
, group
->access
,
3190 group
->shared_bound
)) {
3191 free_bound_list(group
->shared_bound
, array
->n_index
);
3192 group
->shared_bound
= NULL
;
3196 /* Given an initial grouping of array references and shared memory tiles
3197 * for each group that allows for a shared memory tile, merge two groups
3198 * if both have a shared memory tile and if the merged group also has
3199 * a shared memory tile.
3201 * Return the number of group leaders after merging.
3203 static int group_common_shared_memory_tile(struct cuda_gen
*gen
,
3204 struct cuda_array_info
*array
, int n
,
3205 struct cuda_array_ref_group
**groups
, int *leader
, int n_group
)
3208 isl_ctx
*ctx
= isl_space_get_ctx(array
->dim
);
3210 for (i
= 0; n_group
> 1 && i
< n
; ++i
) {
3214 if (!groups
[i
]->shared_bound
)
3216 for (j
= i
- 1; j
>= 0; --j
) {
3219 struct cuda_array_bound
*shared_bound
;
3223 if (!groups
[j
]->shared_bound
)
3226 map
= isl_map_intersect(isl_map_copy(groups
[l
]->access
),
3227 isl_map_copy(groups
[j
]->access
));
3228 empty
= isl_map_is_empty(map
);
3234 map
= isl_map_union(isl_map_copy(groups
[l
]->access
),
3235 isl_map_copy(groups
[j
]->access
));
3236 shared_bound
= create_bound_list(ctx
, array
->n_index
);
3237 if (!can_tile_for_shared_memory(gen
, array
, map
,
3240 free_bound_list(shared_bound
, array
->n_index
);
3244 free_bound_list(groups
[j
]->shared_bound
,
3246 groups
[j
]->shared_bound
= shared_bound
;
3247 isl_map_free(groups
[j
]->access
);
3248 groups
[j
]->access
= map
;
3249 groups
[j
]->n_ref
+= groups
[l
]->n_ref
;
3258 /* Extract an array of array reference groups from the array of references
3259 * and the grouping information in "leader".
3261 * Store the results in array->n_group and array->groups.
3263 static void extract_array_groups(isl_ctx
*ctx
, struct cuda_array_info
*array
,
3264 int n
, struct cuda_array_ref_group
**groups
, int *leader
, int n_group
)
3268 for (i
= 2; i
< n
; ++i
)
3269 leader
[i
] = leader
[leader
[i
]];
3271 array
->n_group
= n_group
;
3272 array
->groups
= isl_alloc_array(ctx
, struct cuda_array_ref_group
*,
3274 assert(array
->groups
);
3277 for (i
= 0; i
< n
; ++i
) {
3279 struct cuda_stmt_access
**refs
;
3281 if (leader
[i
] != i
) {
3282 groups
[i
]->refs
= NULL
;
3283 free_array_ref_group(groups
[i
], array
->n_index
);
3287 refs
= isl_alloc_array(ctx
, struct cuda_stmt_access
*,
3291 for (k
= i
; k
< n
; ++k
)
3292 if (leader
[k
] == i
) {
3293 refs
[l
++] = *groups
[k
]->refs
;
3294 (*groups
[k
]->refs
)->group
= j
;
3297 groups
[i
]->refs
= refs
;
3299 array
->groups
[j
++] = groups
[i
];
3303 /* Group array references that should be considered together when
3304 * deciding whether to access them from private, shared or global memory.
3306 * In particular, if two array references overlap and if one of them
3307 * is a write, then the two references are grouped together.
3308 * Furthermore, if two groups admit a shared memory tile and if the
3309 * combination of the two also admits a shared memory tile, we merge
3312 * During the construction the group->refs field points to a single
3313 * array reference inside the array of array references, while
3314 * group->n_ref contains the number of element in leader that
3315 * (directly or indirectly) point to this group, provided the group
3318 static void group_array_references(struct cuda_gen
*gen
,
3319 struct cuda_array_info
*array
, __isl_keep isl_union_map
*sched
)
3323 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
3324 struct cuda_array_ref_group
**groups
;
3327 groups
= isl_calloc_array(ctx
, struct cuda_array_ref_group
*,
3331 n
= populate_array_references(gen
, array
, sched
, groups
);
3333 leader
= isl_alloc_array(ctx
, int, n
);
3336 n_group
= group_overlapping_writes(n
, groups
, leader
);
3338 for (i
= 0; i
< n
; ++i
)
3340 compute_group_shared_bound(gen
, array
, groups
[i
]);
3342 n_group
= group_common_shared_memory_tile(gen
, array
, n
, groups
,
3345 extract_array_groups(ctx
, array
, n
, groups
, leader
, n_group
);
3351 /* Take tiled_sched, project it onto the shared tile loops and
3352 * the loops that will be wrapped over the threads,
3353 * parametrize the shared tile loops and store the result in gen->shared_sched.
3354 * The position of the first of these parameters is stored in gen->first_shared.
3355 * Also compute a projection that projects out the loops that will be
3356 * wrapped over the threads and store this projection in gen->shared_proj.
3358 static void compute_shared_sched(struct cuda_gen
*gen
)
3363 isl_union_map
*sched
;
3365 sched
= isl_union_map_copy(gen
->tiled_sched
);
3367 dim
= isl_union_map_get_space(sched
);
3368 gen
->first_shared
= isl_space_dim(dim
, isl_dim_param
);
3369 proj
= projection(dim
, gen
->tiled_len
, gen
->shared_len
+ gen
->n_block
);
3370 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
3372 dim
= isl_union_map_get_space(sched
);
3373 par
= parametrization(dim
, gen
->shared_len
+ gen
->n_block
,
3374 0, gen
->shared_len
, "g");
3375 sched
= isl_union_map_intersect_range(sched
,
3376 isl_union_set_from_set(par
));
3378 dim
= isl_union_map_get_space(sched
);
3379 proj
= projection(dim
, gen
->shared_len
+ gen
->n_block
, gen
->shared_len
);
3381 gen
->shared_sched
= sched
;
3382 gen
->shared_proj
= isl_union_map_from_map(proj
);
3385 /* Group references of all arrays in the program.
3387 static void group_references(struct cuda_gen
*gen
)
3390 isl_union_map
*sched
;
3392 sched
= isl_union_map_apply_range(isl_union_map_copy(gen
->shared_sched
),
3393 isl_union_map_copy(gen
->shared_proj
));
3395 for (i
= 0; i
< gen
->n_array
; ++i
)
3396 group_array_references(gen
, &gen
->array
[i
], sched
);
3398 isl_union_map_free(sched
);
3401 /* Free all array information that is local to the current kernel.
3403 static void free_local_array_info(struct cuda_gen
*gen
)
3407 for (i
= 0; i
< gen
->n_array
; ++i
) {
3408 struct cuda_array_info
*array
= &gen
->array
[i
];
3410 for (j
= 0; j
< array
->n_group
; ++j
)
3411 free_array_ref_group(array
->groups
[j
], array
->n_index
);
3412 free(array
->groups
);
3414 if (array
->n_group
== 0)
3416 for (j
= 0; j
< gen
->array
[i
].n_index
; ++j
) {
3417 isl_pw_aff_free(gen
->array
[i
].local_bound
[j
]);
3418 gen
->array
[i
].local_bound
[j
] = NULL
;
3423 static void print_iterator_list(FILE *out
, int len
, const char *prefix
,
3429 for (i
= 0; i
< len
; ++i
) {
3433 fprintf(out
, "(%s%d)", prefix
, i
);
3435 fprintf(out
, "%s%d", prefix
, i
);
3440 /* The sizes of the arrays on the host that have been computed by
3441 * extract_array_info may depend on the parameters. Use the extra
3442 * constraints on the parameters that are valid at "host_domain"
3443 * to simplify these expressions.
3445 static void localize_bounds(struct cuda_gen
*gen
,
3446 __isl_keep isl_set
*host_domain
)
3451 context
= isl_set_copy(host_domain
);
3452 context
= isl_set_params(host_domain
);
3454 for (i
= 0; i
< gen
->n_array
; ++i
) {
3455 struct cuda_array_info
*array
= &gen
->array
[i
];
3457 if (array
->n_group
== 0)
3460 for (j
= 0; j
< array
->n_index
; ++j
) {
3463 pwaff
= isl_pw_aff_copy(array
->bound
[j
]);
3464 pwaff
= isl_pw_aff_gist(pwaff
, isl_set_copy(context
));
3465 array
->local_bound
[j
] = pwaff
;
3468 isl_set_free(context
);
3471 /* Set gen->tile_len and gen->n_parallel to those of the first statement
3472 * in the statement list u.
3473 * Because of the way the schedule is constructed, the other statements
3474 * in the list, if any, should have the same values for these properties.
3476 static void set_tile_len(struct cuda_gen
*gen
, struct clast_user_stmt
*u
)
3479 struct cuda_stmt
*stmt
;
3481 nr
= atoi(u
->statement
->name
+ 2);
3482 stmt
= &gen
->stmts
[nr
];
3484 gen
->tile_len
= stmt
->tile_len
;
3485 gen
->n_parallel
= stmt
->n_parallel
;
3488 /* This function is called for each leaf in the clast of the host code.
3489 * We first specialize the schedule to the site of the leaf, compute
3490 * the size of shared memory and then print the body of host code
3491 * and the associated kernel (through a call to print_kernel_body).
3493 static void print_host_user(struct gpucode_info
*code
,
3494 struct clast_user_stmt
*u
)
3496 struct cuda_gen
*gen
= code
->user
;
3499 isl_set
*host_domain
;
3500 isl_union_map
*access
;
3501 isl_union_map
*local_sched
;
3502 isl_union_set
*arrays
;
3504 set_tile_len(gen
, u
);
3507 host_domain
= extract_entire_host_domain(u
);
3509 local_sched
= isl_union_map_intersect_range(
3510 isl_union_map_copy(gen
->sched
),
3511 isl_union_set_from_set(extend(isl_set_copy(host_domain
),
3512 gen
->untiled_len
)));
3513 access
= isl_union_map_union(isl_union_map_copy(gen
->read
),
3514 isl_union_map_copy(gen
->write
));
3515 access
= isl_union_map_apply_domain(access
,
3516 isl_union_map_copy(local_sched
));
3517 arrays
= isl_union_map_range(access
);
3519 print_indent(code
->dst
, code
->indent
);
3520 fprintf(code
->dst
, "dim3 k%d_dimBlock", gen
->kernel_id
);
3521 print_reverse_list(code
->dst
, gen
->n_block
, gen
->block_dim
);
3522 fprintf(code
->dst
, ";\n");
3524 print_indent(code
->dst
, code
->indent
);
3525 fprintf(code
->dst
, "dim3 k%d_dimGrid", gen
->kernel_id
);
3526 print_reverse_list(code
->dst
, gen
->n_grid
, gen
->grid_dim
);
3527 fprintf(code
->dst
, ";\n");
3529 gen
->tiled_sched
= tile_schedule(gen
, local_sched
);
3530 gen
->tiled_sched
= parametrize_tiled_schedule(gen
, gen
->tiled_sched
);
3531 gen
->tiled_sched
= scale_tile_loops(gen
, gen
->tiled_sched
);
3533 gen
->local_sched
= isl_union_map_copy(gen
->tiled_sched
);
3535 dim
= isl_union_map_get_space(gen
->local_sched
);
3536 par
= parametrization(dim
, gen
->tiled_len
, 0, gen
->shared_len
, "g");
3537 gen
->local_sched
= isl_union_map_intersect_range(gen
->local_sched
,
3538 isl_union_set_from_set(par
));
3540 gen
->local_sched
= thread_tile_schedule(gen
, gen
->local_sched
);
3541 gen
->local_sched
= scale_thread_tile_loops(gen
, gen
->local_sched
);
3543 gen
->private_access
= NULL
;
3544 compute_shared_sched(gen
);
3545 gen
->privatization
= compute_privatization(gen
);
3546 group_references(gen
);
3547 compute_private_size(gen
);
3548 localize_bounds(gen
, host_domain
);
3550 gen
->local_sched
= interchange_for_unroll(gen
, gen
->local_sched
);
3552 print_kernel_launch(gen
, arrays
);
3554 fprintf(gen
->cuda
.kernel_c
, "{\n");
3556 print_kernel_body(gen
, host_domain
, gen
->tiled_sched
);
3558 fprintf(gen
->cuda
.kernel_c
, "}\n");
3560 free_local_array_info(gen
);
3561 isl_map_free(gen
->privatization
);
3562 isl_union_map_free(gen
->private_access
);
3563 isl_union_map_free(gen
->local_sched
);
3564 isl_union_map_free(gen
->tiled_sched
);
3565 isl_union_map_free(gen
->shared_sched
);
3566 isl_union_map_free(gen
->shared_proj
);
3567 isl_union_set_free(arrays
);
3568 isl_set_free(host_domain
);
3570 free(gen
->tile_size
);
3574 /* Use CLooG to generate code for the outer gen->tile_first loops
3575 * of the global schedule in gen->sched.
3576 * The pretty printing of this code is handled by gpu_print_host_stmt,
3577 * which calls print_host_user for each kernel invocation location.
3579 static void print_cloog_host_code(struct cuda_gen
*gen
)
3583 isl_union_map
*sched
;
3584 CloogOptions
*options
;
3585 CloogDomain
*cloog_context
;
3586 CloogUnionDomain
*ud
;
3588 struct clast_stmt
*stmt
;
3591 options
= cloog_options_malloc(gen
->state
);
3592 options
->language
= CLOOG_LANGUAGE_C
;
3594 options
->strides
= 1;
3595 options
->stop
= gen
->tile_first
;
3596 options
->f
= gen
->untiled_len
;
3597 options
->l
= gen
->untiled_len
;
3598 options
->save_domains
= 1;
3599 options
->noscalars
= 1;
3601 sched
= isl_union_map_copy(gen
->sched
);
3602 ud
= cloog_union_domain_from_isl_union_map(sched
);
3603 for (i
= 0; i
< options
->stop
; ++i
) {
3604 snprintf(name
, sizeof(name
), "h%d", i
);
3605 ud
= cloog_union_domain_set_name(ud
, CLOOG_SCAT
, i
, name
);
3607 context
= isl_set_copy(gen
->context
);
3608 cloog_context
= cloog_domain_from_isl_set(context
);
3609 input
= cloog_input_alloc(cloog_context
, ud
);
3611 stmt
= cloog_clast_create_from_input(input
, options
);
3613 gen
->code
.indent
= 0;
3614 gen
->code
.dst
= gen
->cuda
.host_c
;
3615 gen
->code
.print_user_stmt
= NULL
;
3616 gen
->code
.print_user_stmt_list
= &print_host_user
;
3617 gen
->code
.print_for_head
= NULL
;
3618 gen
->code
.print_for_foot
= NULL
;
3619 gen
->code
.user
= gen
;
3620 gpu_print_host_stmt(&gen
->code
, stmt
);
3622 cloog_clast_free(stmt
);
3623 cloog_options_free(options
);
3624 fprintf(gen
->cuda
.host_c
, "\n");
3627 void print_cuda_macros(struct cuda_gen
*gen
)
3629 const char *macros
=
3630 "#define cudaCheckReturn(ret) assert((ret) == cudaSuccess)\n"
3631 "#define cudaCheckKernel()"
3632 " assert(cudaGetLastError() == cudaSuccess)\n\n";
3633 fputs(macros
, gen
->cuda
.host_c
);
3636 void print_host_code(struct cuda_gen
*gen
)
3638 fprintf(gen
->cuda
.host_c
, "{\n");
3639 print_cloog_macros(gen
->cuda
.host_c
);
3640 print_cloog_macros(gen
->cuda
.kernel_c
);
3642 print_cuda_macros(gen
);
3644 declare_device_arrays(gen
);
3646 allocate_device_arrays(gen
);
3647 copy_arrays_to_device(gen
);
3650 print_cloog_host_code(gen
);
3652 copy_arrays_from_device(gen
);
3653 free_device_arrays(gen
);
3655 fprintf(gen
->cuda
.host_c
, "}\n");
3658 __isl_give isl_set
*add_context_from_str(__isl_take isl_set
*set
,
3667 ctx
= isl_set_get_ctx(set
);
3668 context
= isl_set_read_from_str(ctx
, str
);
3669 context
= isl_set_align_params(context
, isl_set_get_space(set
));
3670 set
= isl_set_intersect(set
, context
);
3675 /* Return the union of all iteration domains of the gen->stmts[i].
3677 static __isl_give isl_union_set
*extract_domain(struct cuda_gen
*gen
)
3680 isl_union_set
*domain
;
3682 domain
= isl_union_set_empty(isl_set_get_space(gen
->context
));
3683 for (i
= 0; i
< gen
->n_stmts
; ++i
) {
3686 domain_i
= isl_set_copy(gen
->stmts
[i
].domain
);
3687 domain
= isl_union_set_union(domain
,
3688 isl_union_set_from_set(domain_i
));
3694 /* Information about the outermost tilable bands in the forest of bands.
3696 * tile_len and n_parallel are only sets on band_info structures
3697 * that correspond to outermost bands. For other bands (in particular,
3698 * ancestors of the outermost bands), n_parallal is set to 0.
3700 * prefix is the (padded) schedule leading up to the outermost tilable bands.
3702 * tile_first is the number of schedule dimensions in prefix.
3704 * suffix is the schedule of the outermost tilable bands and their descendants.
3707 struct cuda_gen
*gen
;
3711 isl_union_map
*prefix
;
3712 isl_union_map
*suffix
;
3715 /* Set tile_len and n_parallel of the statement to that of
3716 * their outermost band, recorded in the band_info.
3718 static int set_stmt_tile_len(__isl_take isl_map
*map
, void *user
)
3720 struct band_info
*info
= user
;
3722 struct cuda_stmt
*stmt
;
3724 nr
= atoi(isl_map_get_tuple_name(map
, isl_dim_in
) + 2);
3725 stmt
= &info
->gen
->stmts
[nr
];
3727 stmt
->tile_len
= info
->tile_len
;
3728 stmt
->n_parallel
= info
->n_parallel
;
3735 static void list_select_outer_band(struct cuda_gen
*gen
,
3736 __isl_take isl_band_list
*list
, int pos
, struct band_info
*list_info
);
3738 /* Check if this band has any parallel loops. If so, take it as
3739 * the outermost tilable band. If not, continue looking for the
3740 * outermost tilable band in the children of the current band.
3742 static void band_select_outer_band(struct cuda_gen
*gen
,
3743 __isl_take isl_band
*band
, int pos
, struct band_info
*info
)
3745 int n
= isl_band_n_member(band
);
3748 for (n_parallel
= 0; n_parallel
< n
; ++n_parallel
)
3749 if (!isl_band_member_is_zero_distance(band
, n_parallel
))
3752 info
->n_parallel
= n_parallel
;
3755 info
->tile_first
= pos
;
3757 info
->prefix
= isl_band_get_prefix_schedule(band
);
3758 info
->suffix
= isl_union_map_flat_range_product(
3759 isl_band_get_partial_schedule(band
),
3760 isl_band_get_suffix_schedule(band
));
3761 isl_union_map_foreach_map(info
->prefix
,
3762 &set_stmt_tile_len
, info
);
3763 } else if (isl_band_has_children(band
)) {
3764 isl_band_list
*children
;
3765 children
= isl_band_get_children(band
);
3766 list_select_outer_band(gen
, children
, pos
+ n
, info
);
3769 info
->tile_first
= pos
+ n
;
3771 info
->prefix
= isl_union_map_flat_range_product(
3772 isl_band_get_prefix_schedule(band
),
3773 isl_band_get_partial_schedule(band
));
3774 info
->suffix
= isl_band_get_suffix_schedule(band
);
3775 isl_union_map_foreach_map(info
->prefix
,
3776 &set_stmt_tile_len
, info
);
3779 isl_band_free(band
);
3782 /* Comparison function that returns a non-zero value for band_infos
3783 * with different tile_len fields or different n_parallel fields.
3785 static int cmp_band(const void *p1
, const void *p2
)
3787 const struct band_info
*info1
= p1
;
3788 const struct band_info
*info2
= p2
;
3790 if (info1
->tile_len
!= info2
->tile_len
)
3791 return info1
->tile_len
- info2
->tile_len
;
3793 return info1
->n_parallel
- info2
->n_parallel
;
3796 /* Extend "umap" with coordinates with fixed value "val"
3797 * to a total length of "dst_len", assuming the original dimension is "src_len".
3799 static __isl_give isl_union_map
*extend_range(__isl_take isl_union_map
*umap
,
3800 int src_len
, int dst_len
, int val
)
3806 dim
= isl_union_map_get_space(umap
);
3807 map
= isl_map_reverse(projection(dim
, dst_len
, src_len
));
3808 for (i
= src_len
; i
< dst_len
; ++i
)
3809 map
= isl_map_fix_si(map
, isl_dim_out
, i
, val
);
3811 umap
= isl_union_map_apply_range(umap
, isl_union_map_from_map(map
));
3816 /* Group bands with the same values for tile_len and n_parallel.
3817 * The prefix schedule is then extended with a fixed coordinate that
3818 * is different for each such group.
3819 * Note that the actual values for this coordinate are not important.
3820 * The bands have already been effectively separated at a higher level
3821 * or they are independent and may be executed in parallel.
3822 * The list of band_info has been sorted before this functions is called.
3824 static void separate_bands(struct band_info
*info
, int n
)
3829 for (i
= 0; i
< n
; ++i
) {
3830 int l
= info
[i
].tile_first
;
3833 (info
[i
].tile_len
!= info
[i
- 1].tile_len
||
3834 info
[i
].n_parallel
!= info
[i
- 1].n_parallel
))
3837 info
[i
].prefix
= extend_range(info
[i
].prefix
,
3839 info
[i
].tile_first
= l
+ 1;
3843 /* Select the outermost bands in the elements of the list, align
3844 * their prefix schedules, separate bands with different values
3845 * for tile_len and/or n_parallel and then combine the resulting
3846 * prefix and suffix schedules into a single pair of prefix and
3847 * suffix schedules for the entire list.
3849 static void list_select_outer_band(struct cuda_gen
*gen
,
3850 __isl_take isl_band_list
*list
, int pos
, struct band_info
*list_info
)
3854 int n
= isl_band_list_n_band(list
);
3855 isl_ctx
*ctx
= isl_band_list_get_ctx(list
);
3856 struct band_info
*info
;
3858 isl_union_map
*prefix
;
3859 isl_union_map
*suffix
;
3862 info
= isl_calloc_array(ctx
, struct band_info
, n
);
3866 for (i
= 0; i
< n
; ++i
) {
3867 band
= isl_band_list_get_band(list
, i
);
3868 band_select_outer_band(gen
, band
, pos
, &info
[i
]);
3869 if (info
[i
].tile_first
> max_tile_first
)
3870 max_tile_first
= info
[i
].tile_first
;
3873 for (i
= 0; i
< n
; ++i
) {
3874 if (info
[i
].tile_first
== max_tile_first
)
3876 info
[i
].prefix
= extend_range(info
[i
].prefix
,
3877 info
[i
].tile_first
, max_tile_first
, 0);
3878 info
[i
].tile_first
= max_tile_first
;
3881 qsort(info
, n
, sizeof(struct band_info
), &cmp_band
);
3883 for (i
= 0; i
< n
- 1; ++i
)
3884 if (info
[i
].tile_len
!= info
[i
+ 1].tile_len
||
3885 info
[i
].n_parallel
!= info
[i
+ 1].n_parallel
)
3889 separate_bands(info
, n
);
3891 prefix
= info
[0].prefix
;
3892 suffix
= info
[0].suffix
;
3894 for (i
= 1; i
< n
; ++i
) {
3895 prefix
= isl_union_map_union(prefix
, info
[i
].prefix
);
3896 suffix
= isl_union_map_union(suffix
, info
[i
].suffix
);
3899 list_info
->tile_first
= info
[0].tile_first
;
3900 list_info
->tile_len
= -1;
3901 list_info
->prefix
= prefix
;
3902 list_info
->suffix
= suffix
;
3904 isl_band_list_free(list
);
3908 /* Set max_out to the maximal number of output dimensions over
3911 static int update_max_out(__isl_take isl_map
*map
, void *user
)
3913 int *max_out
= user
;
3914 int n_out
= isl_map_dim(map
, isl_dim_out
);
3916 if (n_out
> *max_out
)
3923 struct align_range_data
{
3928 /* Extend the dimension of the range of the given map to data->max_out and
3929 * then add the result to data->res.
3931 static int map_align_range(__isl_take isl_map
*map
, void *user
)
3933 struct align_range_data
*data
= user
;
3937 int n_out
= isl_map_dim(map
, isl_dim_out
);
3939 dim
= isl_union_map_get_space(data
->res
);
3940 proj
= isl_map_reverse(projection(dim
, data
->max_out
, n_out
));
3941 for (i
= n_out
; i
< data
->max_out
; ++i
)
3942 proj
= isl_map_fix_si(proj
, isl_dim_out
, i
, 0);
3944 map
= isl_map_apply_range(map
, proj
);
3946 data
->res
= isl_union_map_add_map(data
->res
, map
);
3951 /* Extend the ranges of the maps in the union map such they all have
3952 * the same dimension.
3954 static __isl_give isl_union_map
*align_range(__isl_take isl_union_map
*umap
)
3956 struct align_range_data data
;
3959 isl_union_map_foreach_map(umap
, &update_max_out
, &data
.max_out
);
3961 data
.res
= isl_union_map_empty(isl_union_map_get_space(umap
));
3962 isl_union_map_foreach_map(umap
, &map_align_range
, &data
);
3964 isl_union_map_free(umap
);
3968 /* Select the outermost tilable band that (by construction)
3969 * has at least one parallel loop.
3970 * The starting position of the aligned band is stored in the pair
3972 * The sizes and number of parallel loops may be different in different
3973 * parts of the band forest and are therefore stored in the cuda_stmts.
3975 * Return the complete schedule, with the tilable bands aligned
3976 * at gen->tile_first and padded with zero, if needed.
3978 static __isl_give isl_union_map
*select_outer_tilable_band(struct cuda_gen
*gen
,
3979 __isl_keep isl_schedule
*schedule
)
3981 isl_band_list
*list
;
3982 struct band_info info
;
3984 gen
->n_parallel
= 0;
3987 list
= isl_schedule_get_band_forest(schedule
);
3989 list_select_outer_band(gen
, list
, 0, &info
);
3991 gen
->tile_first
= info
.tile_first
;
3992 info
.suffix
= align_range(info
.suffix
);
3994 return isl_union_map_flat_range_product(info
.prefix
, info
.suffix
);
3997 /* Set gen->untiled_len to the number of scheduling dimensions
3998 * for the schedule of the first domain.
3999 * We assume here that this number is the same for all domains.
4001 static int set_untiled_len(__isl_take isl_map
*map
, void *user
)
4003 unsigned *untiled_len
= user
;
4005 *untiled_len
= isl_map_dim(map
, isl_dim_out
);
4011 /* Compute an appropriate schedule based on the accesses in
4012 * gen->read and gen->write.
4014 * We first compute dependences and then use those to compute
4015 * a schedule that has a parallel loop in each tilable band.
4016 * Finally, we select the outermost tilable band.
4018 static void compute_schedule(struct cuda_gen
*gen
,
4019 __isl_take isl_union_map
*sched
)
4021 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
4022 isl_union_set
*domain
;
4023 isl_union_map
*empty
;
4024 isl_union_map
*dep_raw
, *dep2
, *dep3
, *dep
;
4025 isl_union_map
*uninitialized
;
4026 isl_schedule
*schedule
;
4028 empty
= isl_union_map_empty(isl_union_map_get_space(sched
));
4030 isl_union_map_compute_flow(isl_union_map_copy(gen
->read
),
4031 isl_union_map_copy(gen
->write
), empty
,
4032 isl_union_map_copy(sched
),
4033 &dep_raw
, NULL
, &uninitialized
, NULL
);
4034 isl_union_map_compute_flow(isl_union_map_copy(gen
->write
),
4035 isl_union_map_copy(gen
->write
),
4036 isl_union_map_copy(gen
->read
),
4037 isl_union_map_copy(sched
),
4038 &dep2
, &dep3
, NULL
, NULL
);
4039 isl_union_map_free(sched
);
4041 gen
->copy_in
= isl_union_map_range(uninitialized
);
4043 dep
= isl_union_map_union(dep2
, dep3
);
4044 dep
= isl_union_map_union(dep
, dep_raw
);
4045 dep
= isl_union_map_coalesce(dep
);
4047 domain
= extract_domain(gen
);
4048 schedule
= isl_union_set_compute_schedule(isl_union_set_copy(domain
),
4049 isl_union_map_copy(dep
), dep
);
4051 sched
= select_outer_tilable_band(gen
, schedule
);
4053 isl_union_map_foreach_map(sched
, &set_untiled_len
, &gen
->untiled_len
);
4054 sched
= isl_union_map_intersect_domain(sched
, domain
);
4057 isl_schedule_free(schedule
);
4060 static struct cuda_stmt_access
**expr_extract_access(struct pet_expr
*expr
,
4061 struct cuda_stmt_access
**next_access
)
4063 struct cuda_stmt_access
*access
;
4064 isl_ctx
*ctx
= isl_map_get_ctx(expr
->acc
.access
);
4066 access
= isl_alloc_type(ctx
, struct cuda_stmt_access
);
4068 access
->next
= NULL
;
4069 access
->read
= expr
->acc
.read
;
4070 access
->write
= expr
->acc
.write
;
4071 access
->access
= isl_map_copy(expr
->acc
.access
);
4073 *next_access
= access
;
4074 next_access
= &(*next_access
)->next
;
4078 static struct cuda_stmt_access
**expr_extract_accesses(struct pet_expr
*expr
,
4079 struct cuda_stmt_access
**next_access
)
4083 for (i
= 0; i
< expr
->n_arg
; ++i
)
4084 next_access
= expr_extract_accesses(expr
->args
[i
],
4087 if (expr
->type
== pet_expr_access
)
4088 next_access
= expr_extract_access(expr
, next_access
);
4093 static void pet_stmt_extract_accesses(struct cuda_stmt
*stmt
)
4095 struct cuda_stmt_access
**next_access
= &stmt
->accesses
;
4097 stmt
->accesses
= NULL
;
4098 expr_extract_accesses(stmt
->body
, next_access
);
4101 /* Return an array of cuda_stmt representing the statements in "scop".
4103 static struct cuda_stmt
*extract_stmts(isl_ctx
*ctx
, struct pet_scop
*scop
,
4104 __isl_keep isl_set
*context
)
4107 struct cuda_stmt
*stmts
;
4109 stmts
= isl_calloc_array(ctx
, struct cuda_stmt
, scop
->n_stmt
);
4112 for (i
= 0; i
< scop
->n_stmt
; ++i
) {
4113 struct cuda_stmt
*s
= &stmts
[i
];
4115 s
->domain
= isl_set_copy(scop
->stmts
[i
]->domain
);
4116 s
->domain
= isl_set_intersect_params(s
->domain
,
4117 isl_set_copy(context
));
4118 s
->body
= scop
->stmts
[i
]->body
;
4119 pet_stmt_extract_accesses(s
);
4125 /* Replace the scop in the "input" file by equivalent code
4126 * that uses the GPU. "scop" is assumed to correspond to this scop.
4128 * We first compute a schedule that respects the dependences
4129 * of the original program and select the outermost band
4130 * of tilable dimensions that has at least one parallel loop.
4131 * We then have three blocks of dimensions
4135 * The tilable band "B" is first tiled according to "tile.sizes", resulting
4140 * For each iteration of the T loop and for each array, we compute
4141 * the array elements accessed by that iteration, construct a rectangular
4142 * box around it and shift it to the origin. The result is used
4143 * as shared memory for the array.
4145 * We then split off at most 2 parallel loops from the T loops and
4146 * at most 3 parallel loops from the P loops
4150 * The T1/P1 loops are then tiled or "wrapped" over the blocks/threads,
4151 * according to "grid.sizes"/"block.sizes".
4153 * H T1T T1P T2 P1T P1P P2 G
4155 * Finally, the T1P and P1P iterators are equated to the block and
4156 * thread dimensions respectively and so are effectively removed.
4157 * The H loops are run on the host. The T1T, T2, P1T, P2 and G loops
4158 * are run on the GPU.
4160 * Code is generated in three stages. We first generate code for the
4161 * host (the H loops), with iterators h%d. Then, for each leaf node
4162 * of the resulting AST, we generate code for the shared loops (up to
4163 * and including T2), with iterators g%d and after equating the H loops
4164 * to h%d parameters and the T1P loops to the block dimensions.
4165 * Finally, we generate code for the remaining loops in a similar fashion.
4167 int cuda_pet(isl_ctx
*ctx
, struct pet_scop
*scop
, struct ppcg_options
*options
,
4170 isl_union_map
*sched
;
4171 struct cuda_gen gen
;
4176 scop
= pet_scop_align_params(scop
);
4179 gen
.context
= isl_set_copy(scop
->context
);
4180 gen
.context
= add_context_from_str(gen
.context
, options
->ctx
);
4181 gen
.n_stmts
= scop
->n_stmt
;
4182 gen
.stmts
= extract_stmts(ctx
, scop
, gen
.context
);
4183 gen
.read
= pet_scop_collect_reads(scop
);
4184 gen
.write
= pet_scop_collect_writes(scop
);
4185 gen
.options
= options
;
4186 gen
.state
= cloog_isl_state_malloc(gen
.ctx
);
4189 cuda_open_files(&gen
.cuda
, input
);
4191 collect_array_info(&gen
);
4193 sched
= pet_scop_collect_schedule(scop
);
4195 compute_schedule(&gen
, sched
);
4197 print_host_code(&gen
);
4199 cloog_state_free(gen
.state
);
4200 clear_cuda_gen(&gen
);
4202 cuda_close_files(&gen
.cuda
);