2 * Copyright 2010-2011 INRIA Saclay
4 * Use of this software is governed by the GNU LGPLv2.1 license
6 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
7 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
14 #include <isl/polynomial.h>
15 #include <isl/union_set.h>
20 #include <isl/schedule.h>
21 #include <isl/options.h>
22 #include <cloog/isl/cloog.h>
25 #include "cuda_common.h"
26 #include "clast_printer.h"
28 #include "ppcg_options.h"
30 /* The fields stride, shift and shift_map only contain valid information
32 * If so, they express that current index is such that if you add shift,
33 * then the result is always a multiple of stride.
34 * shift_map contains the mapping
36 * i -> (i + shift)/stride
38 struct cuda_array_bound
{
44 isl_basic_map
*shift_map
;
47 struct cuda_array_info
;
49 /* A group of array references in a kernel that should be handled together.
50 * If private_bound is not NULL, then it is mapped to registers.
51 * Otherwise, if shared_bound is not NULL, it is mapped to shared memory.
52 * Otherwise, it is accessed from global memory.
54 struct cuda_array_ref_group
{
55 /* The references in this group access this array. */
56 struct cuda_array_info
*array
;
57 /* Position of this group in the list of reference groups of array. */
60 /* The following fields are use during the construction of the groups.
61 * access is the combined access relation relative to the shared
63 * write is set if any access in the group is a write.
68 /* For each index, size and offset of piece in shared memory. */
69 struct cuda_array_bound
*shared_bound
;
71 /* For each index, size and offset of piece in private memory. */
72 struct cuda_array_bound
*private_bound
;
74 /* References in this group; point to elements of a linked list. */
76 struct cuda_stmt_access
**refs
;
78 /* Last shared memory tile dimension that affects tile of this group. */
80 /* Dimension at which copying to/from shared memory is printed.
81 * if >= 0, then the value is >= last_shared
82 * if -1, then the copying is done at the leaf level.
84 int print_shared_level
;
87 struct cuda_array_info
{
93 /* Name of the array. */
95 /* Number of indices. */
97 /* For each index, a bound on the array in that direction. */
99 /* For each index, bound[i] specialized to the current kernel. */
100 isl_pw_aff
**local_bound
;
102 /* All references to this array; point to elements of a linked list. */
104 struct cuda_stmt_access
**refs
;
106 /* The reference groups associated to this array. */
108 struct cuda_array_ref_group
**groups
;
110 /* For scalars, is this scalar read-only within the entire program? */
114 /* Print the name of the local copy of a given group of array references.
116 static void print_array_name(FILE *out
, struct cuda_array_ref_group
*group
)
120 if (group
->private_bound
)
121 fprintf(out
, "private_");
122 else if (group
->shared_bound
)
123 fprintf(out
, "shared_");
126 fprintf(out
, "%s", group
->array
->name
);
127 if (!global
&& group
->array
->n_group
> 1)
128 fprintf(out
, "_%d", group
->nr
);
131 /* Collect all references to the given array and store pointers to them
134 static void collect_references(struct cuda_gen
*gen
,
135 struct cuda_array_info
*array
)
141 for (i
= 0; i
< gen
->n_stmts
; ++i
) {
142 struct cuda_stmt
*stmt
= &gen
->stmts
[i
];
143 struct cuda_stmt_access
*access
;
145 for (access
= stmt
->accesses
; access
; access
= access
->next
) {
147 name
= isl_map_get_tuple_name(access
->access
,
149 if (name
&& !strcmp(array
->name
, name
))
155 array
->refs
= isl_alloc_array(gen
->ctx
, struct cuda_stmt_access
*, n
);
159 for (i
= 0; i
< gen
->n_stmts
; ++i
) {
160 struct cuda_stmt
*stmt
= &gen
->stmts
[i
];
161 struct cuda_stmt_access
*access
;
163 for (access
= stmt
->accesses
; access
; access
= access
->next
) {
165 name
= isl_map_get_tuple_name(access
->access
,
167 if (!name
|| strcmp(array
->name
, name
))
170 array
->refs
[n
++] = access
;
175 static struct cuda_array_bound
*create_bound_list(isl_ctx
*ctx
, int n_index
)
178 struct cuda_array_bound
*bound
;
180 bound
= isl_alloc_array(ctx
, struct cuda_array_bound
, n_index
);
183 for (i
= 0; i
< n_index
; ++i
) {
184 isl_int_init(bound
[i
].size
);
186 isl_int_init(bound
[i
].stride
);
187 bound
[i
].shift
= NULL
;
188 bound
[i
].shift_map
= NULL
;
194 static void free_bound_list(struct cuda_array_bound
*bound
, int n_index
)
201 for (j
= 0; j
< n_index
; ++j
) {
202 isl_int_clear(bound
[j
].size
);
203 isl_int_clear(bound
[j
].stride
);
204 isl_aff_free(bound
[j
].lb
);
205 isl_aff_free(bound
[j
].shift
);
206 isl_basic_map_free(bound
[j
].shift_map
);
211 static struct pet_array
*find_array(struct pet_scop
*scop
,
212 __isl_keep isl_set
*accessed
)
217 id
= isl_set_get_tuple_id(accessed
);
219 for (i
= 0; i
< scop
->n_array
; ++i
) {
222 id_i
= isl_set_get_tuple_id(scop
->arrays
[i
]->extent
);
229 return i
< scop
->n_array
? scop
->arrays
[i
] : NULL
;
232 /* Compute bounds on the host arrays based on the accessed elements
233 * and collect all references to the array.
235 * If the array is zero-dimensional, i.e., a scalar, we check
236 * whether it is read-only.
238 static int extract_array_info(__isl_take isl_set
*array
, void *user
)
241 struct cuda_gen
*gen
= (struct cuda_gen
*)user
;
245 isl_pw_aff
**local_bounds
;
246 struct pet_array
*pa
;
248 n_index
= isl_set_dim(array
, isl_dim_set
);
249 name
= isl_set_get_tuple_name(array
);
250 bounds
= isl_alloc_array(isl_set_get_ctx(array
),
251 isl_pw_aff
*, n_index
);
253 local_bounds
= isl_calloc_array(isl_set_get_ctx(array
),
254 isl_pw_aff
*, n_index
);
255 assert(local_bounds
);
256 gen
->array
[gen
->n_array
].dim
= isl_set_get_space(array
);
257 gen
->array
[gen
->n_array
].name
= strdup(name
);
258 gen
->array
[gen
->n_array
].n_index
= n_index
;
259 gen
->array
[gen
->n_array
].bound
= bounds
;
260 gen
->array
[gen
->n_array
].local_bound
= local_bounds
;
262 pa
= find_array(gen
->scop
, array
);
265 gen
->array
[gen
->n_array
].type
= strdup(pa
->element_type
);
266 gen
->array
[gen
->n_array
].size
= pa
->element_size
;
270 isl_union_map
*write
;
273 write
= isl_union_map_copy(gen
->write
);
274 space
= isl_set_universe(isl_set_get_space(array
));
275 write
= isl_union_map_intersect_range(write
,
276 isl_union_set_from_set(space
));
277 empty
= isl_union_map_is_empty(write
);
278 isl_union_map_free(write
);
280 gen
->array
[gen
->n_array
].read_only
= empty
;
283 for (i
= 0; i
< n_index
; ++i
) {
288 isl_set
*size
= i
== 0 ? array
: pa
->extent
;
290 bound
= isl_set_dim_max(isl_set_copy(size
), i
);
292 dom
= isl_pw_aff_domain(isl_pw_aff_copy(bound
));
293 ls
= isl_local_space_from_space(isl_set_get_space(dom
));
294 one
= isl_aff_zero_on_domain(ls
);
295 one
= isl_aff_add_constant_si(one
, 1);
296 bound
= isl_pw_aff_add(bound
, isl_pw_aff_alloc(dom
, one
));
297 bound
= isl_pw_aff_gist(bound
, isl_set_copy(gen
->context
));
302 collect_references(gen
, &gen
->array
[gen
->n_array
]);
310 void collect_array_info(struct cuda_gen
*gen
)
312 isl_union_set
*arrays
;
314 arrays
= isl_union_map_range(isl_union_map_copy(gen
->read
));
315 arrays
= isl_union_set_union(arrays
,
316 isl_union_map_range(isl_union_map_copy(gen
->write
)));
317 arrays
= isl_union_set_coalesce(arrays
);
319 gen
->n_array
= isl_union_set_n_set(arrays
);
320 gen
->array
= isl_alloc_array(gen
->ctx
,
321 struct cuda_array_info
, gen
->n_array
);
324 isl_union_set_foreach_set(arrays
, &extract_array_info
, gen
);
325 isl_union_set_free(arrays
);
328 static void free_array_info(struct cuda_gen
*gen
)
332 for (i
= 0; i
< gen
->n_array
; ++i
) {
333 int n_index
= gen
->array
[i
].n_index
;
334 free(gen
->array
[i
].type
);
335 free(gen
->array
[i
].name
);
336 for (j
= 0; j
< n_index
; ++j
) {
337 isl_pw_aff_free(gen
->array
[i
].bound
[j
]);
338 isl_pw_aff_free(gen
->array
[i
].local_bound
[j
]);
340 isl_space_free(gen
->array
[i
].dim
);
341 free(gen
->array
[i
].bound
);
342 free(gen
->array
[i
].local_bound
);
343 free(gen
->array
[i
].refs
);
348 /* Check if a cuda array is a scalar. A scalar is a value that is not stored
349 * as an array or through a pointer reference, but as single data element. At
350 * the moment, scalars are represented as zero dimensional arrays.
352 static int cuda_array_is_scalar(struct cuda_array_info
*array
)
354 return (array
->n_index
== 0);
357 /* Is "array" a read-only scalar?
359 static int cuda_array_is_read_only_scalar(struct cuda_array_info
*array
)
361 return cuda_array_is_scalar(array
) && array
->read_only
;
364 static void declare_device_arrays(struct cuda_gen
*gen
)
368 for (i
= 0; i
< gen
->n_array
; ++i
) {
369 if (cuda_array_is_read_only_scalar(&gen
->array
[i
]))
371 fprintf(gen
->cuda
.host_c
, "%s *dev_%s;\n",
372 gen
->array
[i
].type
, gen
->array
[i
].name
);
374 fprintf(gen
->cuda
.host_c
, "\n");
377 static void print_array_size(struct cuda_gen
*gen
, FILE *out
,
378 struct cuda_array_info
*array
)
383 prn
= isl_printer_to_file(gen
->ctx
, out
);
384 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
385 for (i
= 0; i
< array
->n_index
; ++i
) {
386 prn
= isl_printer_print_str(prn
, "(");
387 prn
= isl_printer_print_pw_aff(prn
, array
->bound
[i
]);
388 prn
= isl_printer_print_str(prn
, ") * ");
390 prn
= isl_printer_print_str(prn
, "sizeof(");
391 prn
= isl_printer_print_str(prn
, array
->type
);
392 prn
= isl_printer_print_str(prn
, ")");
393 isl_printer_free(prn
);
396 static void allocate_device_arrays(struct cuda_gen
*gen
)
400 for (i
= 0; i
< gen
->n_array
; ++i
) {
401 if (cuda_array_is_read_only_scalar(&gen
->array
[i
]))
403 fprintf(gen
->cuda
.host_c
,
404 "cudaCheckReturn(cudaMalloc((void **) &dev_%s, ",
406 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
407 fprintf(gen
->cuda
.host_c
, "));\n");
409 fprintf(gen
->cuda
.host_c
, "\n");
412 static void free_device_arrays(struct cuda_gen
*gen
)
416 for (i
= 0; i
< gen
->n_array
; ++i
) {
417 if (cuda_array_is_read_only_scalar(&gen
->array
[i
]))
419 fprintf(gen
->cuda
.host_c
, "cudaCheckReturn(cudaFree(dev_%s));\n",
424 static void copy_arrays_to_device(struct cuda_gen
*gen
)
428 for (i
= 0; i
< gen
->n_array
; ++i
) {
433 if (cuda_array_is_read_only_scalar(&gen
->array
[i
]))
436 dim
= isl_space_copy(gen
->array
[i
].dim
);
437 read_i
= isl_union_set_extract_set(gen
->copy_in
, dim
);
438 empty
= isl_set_fast_is_empty(read_i
);
439 isl_set_free(read_i
);
443 fprintf(gen
->cuda
.host_c
, "cudaCheckReturn(cudaMemcpy(dev_%s,",
446 if (cuda_array_is_scalar(&(gen
->array
[i
])))
447 fprintf(gen
->cuda
.host_c
, " &%s, ",
450 fprintf(gen
->cuda
.host_c
, " %s, ", gen
->array
[i
].name
);
452 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
453 fprintf(gen
->cuda
.host_c
, ", cudaMemcpyHostToDevice));\n");
455 fprintf(gen
->cuda
.host_c
, "\n");
458 static void copy_arrays_from_device(struct cuda_gen
*gen
)
461 isl_union_set
*write
;
462 write
= isl_union_map_range(isl_union_map_copy(gen
->write
));
464 for (i
= 0; i
< gen
->n_array
; ++i
) {
469 dim
= isl_space_copy(gen
->array
[i
].dim
);
470 write_i
= isl_union_set_extract_set(write
, dim
);
471 empty
= isl_set_fast_is_empty(write_i
);
472 isl_set_free(write_i
);
476 fprintf(gen
->cuda
.host_c
, "cudaCheckReturn(cudaMemcpy(");
477 if (cuda_array_is_scalar(&gen
->array
[i
]))
478 fprintf(gen
->cuda
.host_c
, "&%s, ", gen
->array
[i
].name
);
480 fprintf(gen
->cuda
.host_c
, "%s, ", gen
->array
[i
].name
);
481 fprintf(gen
->cuda
.host_c
, "dev_%s, ", gen
->array
[i
].name
);
482 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
483 fprintf(gen
->cuda
.host_c
, ", cudaMemcpyDeviceToHost));\n");
486 isl_union_set_free(write
);
487 fprintf(gen
->cuda
.host_c
, "\n");
490 static void read_sizes_from_file(struct cuda_gen
*gen
, const char *filename
,
496 file
= fopen(filename
, "r");
500 for (i
= 0; i
< len
; ++i
)
501 if (fscanf(file
, "%d", &sizes
[i
]) < 1)
507 /* Internal data structure for extract_size_of_type.
508 * "type" specifies the name of the space that we want to extract.
509 * "res" is used to store the subset of that space.
511 struct ppcg_extract_size_data
{
516 /* This function is called for each set in a union_set.
517 * If the name of the set matches data->type, we store the
520 static int extract_size_of_type(__isl_take isl_set
*size
, void *user
)
522 struct ppcg_extract_size_data
*data
= user
;
525 name
= isl_set_get_tuple_name(size
);
526 if (name
&& !strcmp(name
, data
->type
)) {
535 /* Given a union map { kernel[i] -> *[...] },
536 * return the range in the space called "type" for the kernel with
537 * sequence number "id".
539 static __isl_give isl_set
*extract_sizes(__isl_keep isl_union_map
*sizes
,
540 const char *type
, int id
)
544 isl_union_set
*local_sizes
;
545 struct ppcg_extract_size_data data
= { type
, NULL
};
550 space
= isl_union_map_get_space(sizes
);
551 space
= isl_space_set_from_params(space
);
552 space
= isl_space_add_dims(space
, isl_dim_set
, 1);
553 space
= isl_space_set_tuple_name(space
, isl_dim_set
, "kernel");
554 dom
= isl_set_universe(space
);
555 dom
= isl_set_fix_si(dom
, isl_dim_set
, 0, id
);
557 local_sizes
= isl_union_set_apply(isl_union_set_from_set(dom
),
558 isl_union_map_copy(sizes
));
559 isl_union_set_foreach_set(local_sizes
, &extract_size_of_type
, &data
);
560 isl_union_set_free(local_sizes
);
564 /* Given a singleton set, extract the first (at most *len) elements
565 * of the single integer tuple into *sizes and update *len if needed.
567 static void read_sizes_from_set(__isl_take isl_set
*set
, int *sizes
, int *len
)
576 dim
= isl_set_dim(set
, isl_dim_set
);
582 for (i
= 0; i
< *len
; ++i
) {
585 ok
= isl_set_plain_is_fixed(set
, isl_dim_set
, i
, &v
);
588 sizes
[i
] = isl_int_get_si(v
);
596 /* Extract user specified "tile" sizes from the "sizes" command line option,
597 * defaulting to option->tile_size in each dimension.
599 static void read_tile_sizes(struct cuda_gen
*gen
)
604 gen
->tile_size
= isl_alloc_array(gen
->ctx
, int, gen
->tile_len
);
605 assert(gen
->tile_size
);
606 for (n
= 0; n
< gen
->tile_len
; ++n
)
607 gen
->tile_size
[n
] = gen
->options
->tile_size
;
609 size
= extract_sizes(gen
->sizes
, "tile", gen
->kernel_id
);
610 read_sizes_from_set(size
, gen
->tile_size
, &gen
->tile_len
);
612 if (gen
->n_parallel
> gen
->tile_len
)
613 gen
->n_parallel
= gen
->tile_len
;
616 /* Extract user specified "block" sizes from the "sizes" command line option,
617 * after filling in some potentially useful defaults.
619 static void read_block_sizes(struct cuda_gen
*gen
)
625 gen
->n_block
= (n
<= 3) ? n
: 3;
626 switch (gen
->n_block
) {
628 gen
->block_dim
[0] = 512;
631 gen
->block_dim
[0] = 32;
632 gen
->block_dim
[1] = 16;
635 gen
->block_dim
[0] = 32;
636 gen
->block_dim
[1] = 4;
637 gen
->block_dim
[2] = 4;
641 size
= extract_sizes(gen
->sizes
, "block", gen
->kernel_id
);
642 read_sizes_from_set(size
, gen
->block_dim
, &gen
->n_block
);
645 /* Extract user specified "grid" sizes from the "sizes" command line option,
646 * after filling in some potentially useful defaults.
648 static void read_grid_sizes(struct cuda_gen
*gen
)
650 int n
= gen
->n_parallel
;
653 gen
->n_grid
= (n
<= 2) ? n
: 2;
654 switch (gen
->n_grid
) {
656 gen
->grid_dim
[0] = 32768;
659 gen
->grid_dim
[0] = 256;
660 gen
->grid_dim
[1] = 256;
664 size
= extract_sizes(gen
->sizes
, "grid", gen
->kernel_id
);
665 read_sizes_from_set(size
, gen
->grid_dim
, &gen
->n_grid
);
668 /* Extract user specified sizes from the "sizes" command line option
669 * after filling in some potentially useful defaults.
671 static void read_sizes(struct cuda_gen
*gen
)
673 read_tile_sizes(gen
);
674 read_block_sizes(gen
);
675 read_grid_sizes(gen
);
678 static void free_stmts(struct cuda_stmt
*stmts
, int n
)
682 for (i
= 0; i
< n
; ++i
) {
683 struct cuda_stmt_access
*access
, *next
;
685 for (access
= stmts
[i
].accesses
; access
; access
= next
) {
687 isl_map_free(access
->access
);
691 isl_set_free(stmts
[i
].domain
);
696 void clear_cuda_gen(struct cuda_gen
*gen
)
698 free_stmts(gen
->stmts
, gen
->n_stmts
);
699 free_array_info(gen
);
700 isl_union_map_free(gen
->sizes
);
701 isl_set_free(gen
->context
);
702 isl_union_set_free(gen
->copy_in
);
703 isl_union_map_free(gen
->sched
);
704 isl_union_map_free(gen
->read
);
705 isl_union_map_free(gen
->write
);
708 static void print_reverse_list(FILE *out
, int len
, int *list
)
716 for (i
= 0; i
< len
; ++i
) {
719 fprintf(out
, "%d", list
[len
- 1 - i
]);
724 static void print_kernel_launch(struct cuda_gen
*gen
,
725 __isl_keep isl_union_set
*arrays
)
732 print_indent(gen
->code
.dst
, gen
->code
.indent
);
733 fprintf(gen
->code
.dst
, "kernel%d <<<k%d_dimGrid, k%d_dimBlock>>> (",
734 gen
->kernel_id
, gen
->kernel_id
, gen
->kernel_id
);
735 fprintf(gen
->cuda
.kernel_c
, "__global__ void kernel%d(",
737 fprintf(gen
->cuda
.kernel_h
, "__global__ void kernel%d(",
740 for (i
= 0; i
< gen
->n_array
; ++i
) {
745 dim
= isl_space_copy(gen
->array
[i
].dim
);
746 arr
= isl_union_set_extract_set(arrays
, dim
);
747 empty
= isl_set_fast_is_empty(arr
);
753 fprintf(gen
->code
.dst
, ", ");
754 fprintf(gen
->cuda
.kernel_c
, ", ");
755 fprintf(gen
->cuda
.kernel_h
, ", ");
758 if (cuda_array_is_read_only_scalar(&gen
->array
[i
])) {
759 fprintf(gen
->code
.dst
, "%s", gen
->array
[i
].name
);
760 fprintf(gen
->cuda
.kernel_c
, "%s %s",
761 gen
->array
[i
].type
, gen
->array
[i
].name
);
762 fprintf(gen
->cuda
.kernel_h
, "%s %s",
763 gen
->array
[i
].type
, gen
->array
[i
].name
);
765 fprintf(gen
->code
.dst
, "dev_%s", gen
->array
[i
].name
);
766 fprintf(gen
->cuda
.kernel_c
, "%s *%s",
767 gen
->array
[i
].type
, gen
->array
[i
].name
);
768 fprintf(gen
->cuda
.kernel_h
, "%s *%s",
769 gen
->array
[i
].type
, gen
->array
[i
].name
);
775 dim
= isl_union_set_get_space(arrays
);
776 nparam
= isl_space_dim(dim
, isl_dim_param
);
777 for (i
= 0; i
< nparam
; ++i
) {
778 const char *name
= isl_space_get_dim_name(dim
, isl_dim_param
, i
);
780 fprintf(gen
->code
.dst
, ", ");
781 fprintf(gen
->cuda
.kernel_c
, ", ");
782 fprintf(gen
->cuda
.kernel_h
, ", ");
784 fprintf(gen
->code
.dst
, "%s", name
);
785 fprintf(gen
->cuda
.kernel_c
, "int %s", name
);
786 fprintf(gen
->cuda
.kernel_h
, "int %s", name
);
791 for (i
= 0; i
< gen
->tile_first
; ++i
) {
793 fprintf(gen
->code
.dst
, ", ");
794 fprintf(gen
->cuda
.kernel_c
, ", ");
795 fprintf(gen
->cuda
.kernel_h
, ", ");
797 fprintf(gen
->code
.dst
, "h%d", i
);
798 fprintf(gen
->cuda
.kernel_c
, "int h%d", i
);
799 fprintf(gen
->cuda
.kernel_h
, "int h%d", i
);
803 fprintf(gen
->code
.dst
, ");\n");
804 fprintf(gen
->cuda
.kernel_c
, ")\n");
805 fprintf(gen
->cuda
.kernel_h
, ");\n");
807 fprintf(gen
->code
.dst
, "cudaCheckKernel();\n");
810 /* Construct a map from a domain of dimensionality "len"
811 * to a domain of dimensionality "len" + "tile_len" that tiles
812 * the "tile_len" coordinates starting at "first".
813 * In particular, [s_i] -> [s_i / tile_size[i], s_i % tile_size[i]].
814 * "dim" prescribes the parameters.
816 static __isl_give isl_map
*tile(__isl_take isl_space
*dim
, int len
,
817 int first
, int tile_len
, int *tile_size
)
827 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
828 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
+ tile_len
);
829 bmap
= isl_basic_map_universe(isl_space_copy(dim
));
830 ls
= isl_local_space_from_space(dim
);
832 for (i
= 0; i
< len
- tile_len
; ++i
) {
833 int j
= i
< first
? i
: i
+ tile_len
;
834 int k
= i
< first
? i
: i
+ 2 * tile_len
;
836 c
= isl_equality_alloc(isl_local_space_copy(ls
));
837 isl_int_set_si(v
, -1);
838 isl_constraint_set_coefficient(c
, isl_dim_in
, j
, v
);
839 isl_int_set_si(v
, 1);
840 isl_constraint_set_coefficient(c
, isl_dim_out
, k
, v
);
841 bmap
= isl_basic_map_add_constraint(bmap
, c
);
844 for (i
= 0; i
< tile_len
; ++i
) {
845 c
= isl_equality_alloc(isl_local_space_copy(ls
));
846 isl_int_set_si(v
, -1);
847 isl_constraint_set_coefficient(c
, isl_dim_in
, first
+ i
, v
);
848 isl_int_set_si(v
, tile_size
[i
]);
849 isl_constraint_set_coefficient(c
, isl_dim_out
, first
+ i
, v
);
850 isl_int_set_si(v
, 1);
851 isl_constraint_set_coefficient(c
, isl_dim_out
,
852 first
+ i
+ tile_len
, v
);
853 bmap
= isl_basic_map_add_constraint(bmap
, c
);
855 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
856 isl_int_set_si(v
, 1);
857 isl_constraint_set_coefficient(c
, isl_dim_out
,
858 first
+ i
+ tile_len
, v
);
859 bmap
= isl_basic_map_add_constraint(bmap
, c
);
861 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
862 isl_int_set_si(v
, -1);
863 isl_constraint_set_coefficient(c
, isl_dim_out
,
864 first
+ i
+ tile_len
, v
);
865 isl_int_set_si(v
, tile_size
[i
] - 1);
866 isl_constraint_set_constant(c
, v
);
867 bmap
= isl_basic_map_add_constraint(bmap
, c
);
870 isl_local_space_free(ls
);
873 return isl_map_from_basic_map(bmap
);
876 /* Construct a map from a domain of dimensionality "len"
877 * to a domain of dimensionality "len" + "wrap_len" that "wraps"
878 * the "wrap_len" coordinates starting at "first" according to "wrap_size".
879 * In particular, [s_i] -> [s_i, s_i % wrap_size[i]].
880 * To do so, we need extra variables corresponding to [s_i / wrap_size[i]],
881 * that are projected out at the end.
882 * "dim" prescribes the parameters.
884 static __isl_give isl_map
*wrap(__isl_take isl_space
*dim
, int len
,
885 int first
, int wrap_len
, int *wrap_size
)
892 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
893 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
+ 2 * wrap_len
);
894 bmap
= isl_basic_map_universe(isl_space_copy(dim
));
895 ls
= isl_local_space_from_space(dim
);
897 for (i
= 0; i
< len
; ++i
) {
898 int k
= i
< first
+ wrap_len
? i
: i
+ 2 * wrap_len
;
900 c
= isl_equality_alloc(isl_local_space_copy(ls
));
901 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, -1);
902 isl_constraint_set_coefficient_si(c
, isl_dim_out
, k
, 1);
903 bmap
= isl_basic_map_add_constraint(bmap
, c
);
906 for (i
= 0; i
< wrap_len
; ++i
) {
907 c
= isl_equality_alloc(isl_local_space_copy(ls
));
908 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
910 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
911 first
+ wrap_len
+ i
, 1);
912 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
913 first
+ 2 * wrap_len
+ i
, wrap_size
[i
]);
914 bmap
= isl_basic_map_add_constraint(bmap
, c
);
916 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
917 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
918 first
+ wrap_len
+ i
, 1);
919 bmap
= isl_basic_map_add_constraint(bmap
, c
);
921 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
922 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
923 first
+ wrap_len
+ i
, -1);
924 isl_constraint_set_constant_si(c
, wrap_size
[i
] - 1);
925 bmap
= isl_basic_map_add_constraint(bmap
, c
);
928 isl_local_space_free(ls
);
930 bmap
= isl_basic_map_project_out(bmap
, isl_dim_out
,
931 first
+ 2 * wrap_len
, wrap_len
);
933 return isl_map_from_basic_map(bmap
);
936 /* Add "n" parameters named prefix%d.
938 static __isl_give isl_set
*add_params( __isl_take isl_set
*set
,
939 int n
, const char *prefix
)
945 nparam
= isl_set_dim(set
, isl_dim_param
);
946 set
= isl_set_add_dims(set
, isl_dim_param
, n
);
948 for (i
= 0; i
< n
; ++i
) {
949 snprintf(name
, sizeof(name
), "%s%d", prefix
, i
);
950 set
= isl_set_set_dim_name(set
, isl_dim_param
,
957 /* Equate the "n" dimensions of "set" starting at "first" to
958 * freshly created parameters named prefix%d.
960 static __isl_give isl_set
*parametrize(__isl_take isl_set
*set
,
961 int first
, int n
, const char *prefix
)
971 nparam
= isl_set_dim(set
, isl_dim_param
);
973 set
= add_params(set
, n
, prefix
);
975 dim
= isl_set_get_space(set
);
976 bset
= isl_basic_set_universe(isl_space_copy(dim
));
977 ls
= isl_local_space_from_space(dim
);
981 for (i
= 0; i
< n
; ++i
) {
982 c
= isl_equality_alloc(isl_local_space_copy(ls
));
983 isl_int_set_si(v
, -1);
984 isl_constraint_set_coefficient(c
, isl_dim_param
, nparam
+ i
, v
);
985 isl_int_set_si(v
, 1);
986 isl_constraint_set_coefficient(c
, isl_dim_set
, first
+ i
, v
);
987 bset
= isl_basic_set_add_constraint(bset
, c
);
991 isl_local_space_free(ls
);
993 return isl_set_intersect(set
, isl_set_from_basic_set(bset
));
996 static __isl_give isl_set
*parametrization(__isl_take isl_space
*dim
,
997 int len
, int first
, int n
, const char *prefix
)
1001 dim
= isl_space_add_dims(dim
, isl_dim_set
, len
);
1002 set
= isl_set_universe(dim
);
1004 return parametrize(set
, first
, n
, prefix
);
1007 /* Tile the B loops over the tile sizes and then tile/wrap
1008 * the T1 loops over the blocks.
1010 static __isl_give isl_union_map
*tile_schedule(struct cuda_gen
*gen
,
1011 __isl_take isl_union_map
*sched
)
1014 isl_map
*tiling
, *block_tiling
;
1016 dim
= isl_union_map_get_space(sched
);
1017 tiling
= tile(isl_space_copy(dim
), gen
->untiled_len
,
1018 gen
->tile_first
, gen
->tile_len
, gen
->tile_size
);
1020 if (gen
->options
->wrap
)
1021 block_tiling
= wrap(dim
, gen
->untiled_len
+ gen
->tile_len
,
1022 gen
->tile_first
, gen
->n_grid
, gen
->grid_dim
);
1024 block_tiling
= tile(dim
, gen
->untiled_len
+ gen
->tile_len
,
1025 gen
->tile_first
, gen
->n_grid
, gen
->grid_dim
);
1027 gen
->tiled_len
= gen
->untiled_len
+ gen
->tile_len
+ gen
->n_grid
;
1029 tiling
= isl_map_apply_range(tiling
, block_tiling
);
1031 sched
= isl_union_map_apply_range(sched
,
1032 isl_union_map_from_map(tiling
));
1034 gen
->shared_len
= gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
;
1039 static __isl_give isl_union_map
*parametrize_tiled_schedule(
1040 struct cuda_gen
*gen
, __isl_take isl_union_map
*sched
)
1045 dim
= isl_union_map_get_space(sched
);
1046 par
= parametrization(dim
, gen
->tiled_len
, 0, gen
->tile_first
, "h");
1047 sched
= isl_union_map_intersect_range(sched
,
1048 isl_union_set_from_set(par
));
1050 dim
= isl_union_map_get_space(sched
);
1051 par
= parametrization(dim
, gen
->tiled_len
,
1052 gen
->tile_first
+ gen
->n_grid
, gen
->n_grid
, "b");
1053 sched
= isl_union_map_intersect_range(sched
,
1054 isl_union_set_from_set(par
));
1059 /* Tile/wrap the P1 loops over the threads.
1061 static __isl_give isl_union_map
*thread_tile_schedule(struct cuda_gen
*gen
,
1062 __isl_take isl_union_map
*sched
)
1068 dim
= isl_union_map_get_space(sched
);
1070 if (gen
->options
->wrap
)
1071 tiling
= wrap(isl_space_copy(dim
), gen
->tiled_len
,
1072 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
1074 tiling
= tile(isl_space_copy(dim
), gen
->tiled_len
,
1075 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
1076 gen
->thread_tiled_len
= gen
->tiled_len
+ gen
->n_block
;
1078 sched
= isl_union_map_apply_range(sched
,
1079 isl_union_map_from_map(tiling
));
1081 par
= parametrization(dim
, gen
->thread_tiled_len
,
1082 gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
+ gen
->n_block
,
1084 sched
= isl_union_map_intersect_range(sched
,
1085 isl_union_set_from_set(par
));
1087 gen
->shared_len
= gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
;
1092 /* If the user asked for it, scale the shared memory tile loops
1093 * (T1T and T2) of "sched" by gen->tile_size[i].
1094 * If we are not performing "wrapping", then additionally scale the T1P
1095 * loops by gen->grid_dim[i].
1097 static __isl_give isl_union_map
*scale_tile_loops(struct cuda_gen
*gen
,
1098 __isl_take isl_union_map
*sched
)
1102 isl_basic_map
*scale
;
1104 isl_local_space
*ls
;
1106 if (!gen
->options
->scale_tile_loops
)
1109 dim
= isl_union_map_get_space(sched
);
1110 dim
= isl_space_add_dims(dim
, isl_dim_in
, gen
->tiled_len
);
1111 dim
= isl_space_add_dims(dim
, isl_dim_out
, gen
->tiled_len
);
1112 scale
= isl_basic_map_universe(isl_space_copy(dim
));
1113 ls
= isl_local_space_from_space(dim
);
1115 for (i
= 0; i
< gen
->tiled_len
; ++i
) {
1118 if (i
>= gen
->tile_first
&& i
< gen
->tile_first
+ gen
->n_grid
) {
1119 f
= gen
->tile_size
[i
- gen
->tile_first
];
1120 if (!gen
->options
->wrap
)
1121 f
*= gen
->grid_dim
[i
- gen
->tile_first
];
1122 } else if (i
>= gen
->tile_first
+ gen
->n_grid
&&
1123 i
< gen
->tile_first
+ gen
->n_grid
+ gen
->tile_len
) {
1124 f
= gen
->tile_size
[i
- (gen
->tile_first
+ gen
->n_grid
)];
1127 c
= isl_equality_alloc(isl_local_space_copy(ls
));
1128 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
1129 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
1130 scale
= isl_basic_map_add_constraint(scale
, c
);
1133 isl_local_space_free(ls
);
1135 sched
= isl_union_map_apply_range(sched
,
1136 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
1141 /* If we are not performing "wrapping" and if the user asked for it,
1142 * scale the thread tile loops (P1T) of "sched" by gen->block_dim[i].
1144 static __isl_give isl_union_map
*scale_thread_tile_loops(struct cuda_gen
*gen
,
1145 __isl_take isl_union_map
*sched
)
1149 isl_basic_map
*scale
;
1151 isl_local_space
*ls
;
1153 if (gen
->options
->wrap
)
1155 if (!gen
->options
->scale_tile_loops
)
1158 dim
= isl_union_map_get_space(sched
);
1159 dim
= isl_space_add_dims(dim
, isl_dim_in
, gen
->thread_tiled_len
);
1160 dim
= isl_space_add_dims(dim
, isl_dim_out
, gen
->thread_tiled_len
);
1161 scale
= isl_basic_map_universe(isl_space_copy(dim
));
1162 ls
= isl_local_space_from_space(dim
);
1164 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
) {
1167 if (i
>= gen
->shared_len
&&
1168 i
< gen
->shared_len
+ gen
->n_block
)
1169 f
= gen
->block_dim
[i
- gen
->shared_len
];
1171 c
= isl_equality_alloc(isl_local_space_copy(ls
));
1172 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
1173 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
1174 scale
= isl_basic_map_add_constraint(scale
, c
);
1177 isl_local_space_free(ls
);
1179 sched
= isl_union_map_apply_range(sched
,
1180 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
1185 /* If we are not performing "wrapping" and if the user asked for it,
1186 * scale the "n_tile" loops starting at "first" of "sched" by gen->block_dim[i].
1188 static __isl_give isl_union_map
*scale_access_tile_loops(struct cuda_gen
*gen
,
1189 __isl_take isl_union_map
*sched
, int len
, int first
, int n_tile
)
1193 isl_basic_map
*scale
;
1195 isl_local_space
*ls
;
1197 if (gen
->options
->wrap
)
1199 if (!gen
->options
->scale_tile_loops
)
1202 dim
= isl_union_map_get_space(sched
);
1203 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
1204 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
);
1205 scale
= isl_basic_map_universe(isl_space_copy(dim
));
1206 ls
= isl_local_space_from_space(dim
);
1208 for (i
= 0; i
< len
; ++i
) {
1211 if (i
>= first
&& i
< first
+ n_tile
)
1212 f
= gen
->block_dim
[i
- first
];
1214 c
= isl_equality_alloc(isl_local_space_copy(ls
));
1215 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
1216 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
1217 scale
= isl_basic_map_add_constraint(scale
, c
);
1220 isl_local_space_free(ls
);
1222 sched
= isl_union_map_apply_range(sched
,
1223 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
1228 /* If print_user_stmt is set, we want to print the statements ourselves,
1229 * instead of relying on the C preprocessor. If so, we need to use
1230 * the stop option so that the domains will be saved on the statement
1233 static void print_cloog_shared_body(struct cuda_gen
*gen
,
1234 __isl_keep isl_set
*context
, __isl_keep isl_union_map
*sched
, int len
,
1235 void (*print_user_stmt
)(struct clast_printer_info
*info
,
1236 struct clast_user_stmt
*s
),
1240 CloogOptions
*options
;
1241 CloogDomain
*cloog_context
;
1242 CloogUnionDomain
*ud
;
1244 struct clast_stmt
*stmt
;
1247 sched
= isl_union_map_copy(sched
);
1248 sched
= isl_union_map_align_params(sched
, isl_set_get_space(context
));
1250 options
= cloog_options_malloc(gen
->state
);
1251 options
->language
= CLOOG_LANGUAGE_C
;
1252 options
->strides
= 1;
1256 options
->override
= 1;
1257 options
->save_domains
= 1;
1258 options
->noscalars
= 1;
1259 options
->first_unroll
= first_unroll
;
1261 ud
= cloog_union_domain_from_isl_union_map(sched
);
1262 for (i
= 0; i
< len
; ++i
) {
1263 snprintf(name
, sizeof(name
), "c%d", i
);
1264 ud
= cloog_union_domain_set_name(ud
, CLOOG_SCAT
, i
, name
);
1266 cloog_context
= cloog_domain_from_isl_set(isl_set_copy(context
));
1267 input
= cloog_input_alloc(cloog_context
, ud
);
1269 stmt
= cloog_clast_create_from_input(input
, options
);
1271 gen
->stmt_code
.indent
= gen
->kernel_code
.indent
;
1272 gen
->stmt_code
.dst
= gen
->cuda
.kernel_c
;
1273 gen
->stmt_code
.print_user_stmt
= print_user_stmt
;
1274 gen
->stmt_code
.print_user_stmt_list
= NULL
;
1275 gen
->stmt_code
.print_for_head
= NULL
;
1276 gen
->stmt_code
.print_for_foot
= NULL
;
1277 gen
->stmt_code
.user
= gen
;
1278 print_clast(&gen
->stmt_code
, stmt
);
1280 cloog_clast_free(stmt
);
1281 cloog_options_free(options
);
1284 /* Add "len" parameters p[i] called prefix%d,
1285 * with bounds to 0 <= p[i] < size[i].
1287 __isl_give isl_set
*add_bounded_parameters(__isl_take isl_set
*set
,
1288 int len
, int *size
, const char *prefix
)
1294 isl_basic_set
*bset
;
1296 isl_local_space
*ls
;
1299 nparam
= isl_set_dim(set
, isl_dim_param
);
1300 set
= isl_set_add_dims(set
, isl_dim_param
, len
);
1302 for (i
= 0; i
< len
; ++i
) {
1303 snprintf(name
, sizeof(name
), "%s%d", prefix
, i
);
1304 set
= isl_set_set_dim_name(set
, isl_dim_param
,
1308 dim
= isl_set_get_space(set
);
1309 bset
= isl_basic_set_universe(isl_space_copy(dim
));
1310 ls
= isl_local_space_from_space(dim
);
1314 for (i
= 0; i
< len
; ++i
) {
1315 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
1316 isl_int_set_si(v
, 1);
1317 isl_constraint_set_coefficient(c
, isl_dim_param
, nparam
+ i
, v
);
1318 bset
= isl_basic_set_add_constraint(bset
, c
);
1320 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
1321 isl_int_set_si(v
, -1);
1322 isl_constraint_set_coefficient(c
, isl_dim_param
, nparam
+ i
, v
);
1323 isl_int_set_si(v
, size
[i
] - 1);
1324 isl_constraint_set_constant(c
, v
);
1325 bset
= isl_basic_set_add_constraint(bset
, c
);
1329 isl_local_space_free(ls
);
1331 return isl_set_intersect(set
, isl_set_from_basic_set(bset
));
1334 static void print_shared_body(struct cuda_gen
*gen
,
1335 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*sched
,
1336 int len
, void (*print_user_stmt
)(struct clast_printer_info
*info
,
1337 struct clast_user_stmt
*s
),
1342 context
= isl_set_copy(shared_domain
);
1343 context
= parametrize(context
, 0, gen
->shared_len
, "g");
1344 context
= isl_set_project_out(context
, isl_dim_set
, 0, gen
->shared_len
);
1345 context
= add_bounded_parameters(context
,
1346 gen
->n_block
, gen
->block_dim
, "t");
1348 print_cloog_shared_body(gen
, context
, sched
, len
, print_user_stmt
,
1351 isl_set_free(context
);
1354 /* Given a tile of an array, construct a map that maps each element
1355 * of the tile to a copy of the tile shifted to the origin
1356 * (based on the lower bounds in group->private_bound or group->shared_bound).
1357 * If any of the indices is strided, then {private,shared}_bound[i].shift_map
1358 * is applied to the index first.
1359 * The domain of the resulting map is "access",
1360 * while the range space is anonymous.
1362 static __isl_give isl_map
*shift_access(__isl_take isl_set
*access
,
1363 struct cuda_array_ref_group
*group
)
1367 isl_basic_set
*bset
;
1368 isl_basic_map
*bmap
;
1370 isl_basic_set
*offset
;
1371 isl_basic_map
*shift
;
1372 isl_basic_map
*pre_shift
;
1375 struct cuda_array_bound
*bounds
;
1376 int n_index
= group
->array
->n_index
;
1378 bounds
= group
->private_bound
;
1380 bounds
= group
->shared_bound
;
1382 dim
= isl_set_get_space(access
);
1383 dim
= isl_space_drop_dims(dim
, isl_dim_set
, 0, n_index
);
1384 offset
= isl_basic_set_universe(dim
);
1385 for (i
= 0; i
< n_index
; ++i
) {
1386 lb
= isl_aff_copy(bounds
[i
].lb
);
1387 bmap
= isl_basic_map_from_aff(lb
);
1388 bset
= isl_basic_map_range(bmap
);
1389 offset
= isl_basic_set_flat_product(offset
, bset
);
1391 offset
= isl_basic_set_neg(offset
);
1393 dim
= isl_space_map_from_set(isl_set_get_space(access
));
1394 shift
= isl_basic_map_identity(dim
);
1395 shift
= isl_basic_map_set_tuple_name(shift
, isl_dim_out
, NULL
);
1397 bset
= isl_basic_set_universe(isl_set_get_space(access
));
1398 bmap
= isl_basic_map_from_domain_and_range(bset
, offset
);
1400 shift
= isl_basic_map_sum(shift
, bmap
);
1402 dim
= isl_set_get_space(access
);
1403 dim
= isl_space_drop_dims(dim
, isl_dim_set
, 0, n_index
);
1404 dim
= isl_space_map_from_set(dim
);
1405 pre_shift
= isl_basic_map_universe(isl_space_copy(dim
));
1406 dim
= isl_space_add_dims(dim
, isl_dim_in
, 1);
1407 dim
= isl_space_add_dims(dim
, isl_dim_out
, 1);
1408 for (i
= 0; i
< n_index
; ++i
) {
1409 if (!bounds
[i
].shift_map
)
1410 bmap
= isl_basic_map_identity(isl_space_copy(dim
));
1412 bmap
= isl_basic_map_copy(bounds
[i
].shift_map
);
1413 pre_shift
= isl_basic_map_flat_product(pre_shift
, bmap
);
1415 isl_space_free(dim
);
1416 name
= isl_basic_map_get_tuple_name(shift
, isl_dim_in
);
1417 pre_shift
= isl_basic_map_set_tuple_name(pre_shift
, isl_dim_in
, name
);
1418 pre_shift
= isl_basic_map_set_tuple_name(pre_shift
, isl_dim_out
, name
);
1419 shift
= isl_basic_map_apply_range(pre_shift
, shift
);
1421 sched
= isl_map_from_basic_map(shift
);
1422 sched
= isl_map_intersect_domain(sched
, access
);
1427 /* Construct a schedule for iterating over all elements in the given
1428 * piece of an array. The schedule iterates over a copy of the piece
1429 * that is shifted to the origin.
1430 * We subsequently also perform the tiling/wrapping over the threads.
1432 * In particular, we tile the final iterators so that the final thread
1433 * dimension runs over the final array dimension.
1434 * However, if those final iterators have only a single iteration,
1435 * we try to tile earlier iterators instead.
1437 static __isl_give isl_union_map
*access_schedule(struct cuda_gen
*gen
,
1438 __isl_take isl_set
*access
, struct cuda_array_ref_group
*group
)
1442 isl_union_map
*usched
;
1445 unsigned nvar
= isl_set_dim(access
, isl_dim_set
);
1449 sched
= shift_access(access
, group
);
1451 n_tile
= gen
->n_block
;
1452 if (n_tile
> nvar
) {
1454 sched
= isl_map_insert_dims(sched
,
1455 isl_dim_out
, 0, n_tile
- nvar
);
1456 for (i
= 0; i
< n_tile
- nvar
; ++i
)
1457 sched
= isl_map_fix_si(sched
, isl_dim_out
, i
, 0);
1461 first
= nvar
- n_tile
;
1463 for (; first
> 0; first
--)
1464 if (!isl_map_plain_is_fixed(sched
, isl_dim_out
,
1465 first
+ n_tile
- 1, NULL
))
1468 dim
= isl_map_get_space(sched
);
1469 dim
= isl_space_params(dim
);
1470 if (gen
->options
->wrap
)
1471 tiling
= wrap(isl_space_copy(dim
), nvar
, first
,
1472 n_tile
, gen
->block_dim
);
1474 tiling
= tile(isl_space_copy(dim
), nvar
, first
,
1475 n_tile
, gen
->block_dim
);
1476 sched
= isl_map_apply_range(sched
, tiling
);
1478 par
= parametrization(dim
, nvar
+ n_tile
, first
+ n_tile
, n_tile
, "t");
1479 usched
= isl_union_map_from_map(sched
);
1480 usched
= isl_union_map_intersect_range(usched
,
1481 isl_union_set_from_set(par
));
1483 usched
= scale_access_tile_loops(gen
, usched
, nvar
+ n_tile
,
1489 /* Print an access to the element in the global memory copy of the
1490 * given array that corresponds to the element described by "pma".
1491 * of the original array.
1492 * The copy in global memory has been linearized, so we need to take
1493 * the array size into account.
1495 static void print_global_index(FILE *out
,
1496 struct cuda_array_info
*array
, __isl_keep isl_pw_multi_aff
*pma
,
1497 __isl_keep isl_set
*domain
)
1500 isl_ctx
*ctx
= isl_pw_multi_aff_get_ctx(pma
);
1503 if (cuda_array_is_scalar(array
)) {
1504 fprintf(out
, "*%s", array
->name
);
1508 fprintf(out
, "%s[", array
->name
);
1509 prn
= isl_printer_to_file(ctx
, out
);
1510 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
1511 for (i
= 0; i
+ 1 < array
->n_index
; ++i
)
1512 prn
= isl_printer_print_str(prn
, "(");
1513 for (i
= 0; i
< array
->n_index
; ++i
) {
1514 isl_pw_aff
*pa
= isl_pw_multi_aff_get_pw_aff(pma
, i
);
1515 pa
= isl_pw_aff_coalesce(pa
);
1516 pa
= isl_pw_aff_gist(pa
, isl_set_copy(domain
));
1518 prn
= isl_printer_print_str(prn
, ") * (");
1519 prn
= isl_printer_print_pw_aff(prn
,
1520 array
->local_bound
[i
]);
1521 prn
= isl_printer_print_str(prn
, ") + ");
1523 prn
= isl_printer_print_pw_aff(prn
, pa
);
1524 isl_pw_aff_free(pa
);
1526 isl_printer_free(prn
);
1530 /* Given an index expression into a tile of an array, adjust the expression
1531 * to a shift of the tile to the origin
1532 * (based on the lower bounds in array->shared_bound).
1533 * If the index is strided, then we first add
1534 * bound->shift and divide by bound->stride.
1536 static __isl_give isl_pw_aff
*shift_index(__isl_take isl_pw_aff
*pa
,
1537 struct cuda_array_info
*array
,
1538 struct cuda_array_bound
*bound
, __isl_take isl_set
*domain
)
1545 shift
= bound
->shift
;
1546 shift
= isl_aff_copy(shift
);
1547 shift
= isl_aff_project_domain_on_params(shift
);
1548 shift
= isl_aff_align_params(shift
, isl_pw_aff_get_space(pa
));
1549 tmp
= isl_pw_aff_alloc(isl_set_copy(domain
), shift
);
1550 pa
= isl_pw_aff_add(pa
, tmp
);
1551 pa
= isl_pw_aff_scale_down(pa
, bound
->stride
);
1554 lb
= isl_aff_copy(bound
->lb
);
1555 lb
= isl_aff_project_domain_on_params(lb
);
1557 lb
= isl_aff_align_params(lb
, isl_pw_aff_get_space(pa
));
1559 tmp
= isl_pw_aff_alloc(isl_set_copy(domain
), lb
);
1560 pa
= isl_pw_aff_sub(pa
, tmp
);
1561 pa
= isl_pw_aff_coalesce(pa
);
1562 pa
= isl_pw_aff_gist(pa
, domain
);
1567 /* Print an access to the element in the private/shared memory copy of the
1568 * given array reference group that corresponds to the element described
1569 * by "pma" of the original array.
1570 * Since the array in private/shared memory is just a shifted copy of part
1571 * of the original array, we simply need to subtract the lower bound,
1572 * which was computed in can_tile_for_shared_memory.
1573 * If any of the indices is strided, then we first add
1574 * bounds[i].shift and divide by bounds[i].stride.
1576 static void print_local_index(FILE *out
,
1577 struct cuda_array_ref_group
*group
, struct cuda_array_bound
*bounds
,
1578 __isl_keep isl_pw_multi_aff
*pma
, __isl_keep isl_set
*domain
)
1581 isl_ctx
*ctx
= isl_pw_multi_aff_get_ctx(pma
);
1583 struct cuda_array_info
*array
= group
->array
;
1585 print_array_name(out
, group
);
1586 for (i
= 0; i
< array
->n_index
; ++i
) {
1587 isl_pw_aff
*pa
= isl_pw_multi_aff_get_pw_aff(pma
, i
);
1589 pa
= shift_index(pa
, array
, &bounds
[i
], isl_set_copy(domain
));
1592 prn
= isl_printer_to_file(ctx
, out
);
1593 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
1594 prn
= isl_printer_print_pw_aff(prn
, pa
);
1595 isl_printer_free(prn
);
1597 isl_pw_aff_free(pa
);
1601 /* This function is called for each leaf in the clast of the code
1602 * for copying to or from shared/private memory.
1603 * The statement name is {read,write}_{shared,private}_<array>.
1605 * The schedule iterates over the array elements, so we can use
1606 * the domain of copy_sched at the current scheduling position
1607 * as the index of the array.
1609 static void print_copy_statement(struct clast_printer_info
*code
,
1610 struct clast_user_stmt
*u
)
1612 struct cuda_gen
*gen
= code
->user
;
1615 struct cuda_array_ref_group
*group
= gen
->copy_group
;
1616 struct cuda_array_bound
*bounds
= gen
->copy_bound
;
1623 isl_pw_multi_aff
*pma
;
1626 read
= !strncmp(u
->statement
->name
, "read", 4);
1628 domain
= extract_host_domain(u
);
1631 sched
= isl_map_copy(gen
->copy_sched
);
1632 sched
= isl_map_reverse(sched
);
1633 sched
= isl_map_intersect_domain(sched
, domain
);
1634 n_in
= isl_map_dim(sched
, isl_dim_in
);
1635 n_out
= isl_map_dim(sched
, isl_dim_out
);
1636 dim
= isl_map_get_space(sched
);
1637 dim
= isl_space_drop_dims(dim
, isl_dim_in
, 0, n_in
);
1638 dim
= isl_space_drop_dims(dim
, isl_dim_out
, 0, n_out
);
1639 param
= parametrization(dim
, n_in
, 0, n_in
, "c");
1640 sched
= isl_map_align_params(sched
, isl_set_get_space(param
));
1641 sched
= isl_map_intersect_domain(sched
, param
);
1642 index
= isl_map_range(sched
);
1643 domain
= isl_set_copy(index
);
1644 pma
= isl_pw_multi_aff_from_set(index
);
1645 pma
= isl_pw_multi_aff_coalesce(pma
);
1646 domain
= isl_set_params(domain
);
1648 print_indent(code
->dst
, code
->indent
);
1650 print_local_index(code
->dst
, group
, bounds
, pma
, domain
);
1651 fprintf(code
->dst
, " = ");
1652 print_global_index(code
->dst
, group
->array
, pma
, domain
);
1654 print_global_index(code
->dst
, group
->array
, pma
, domain
);
1655 fprintf(code
->dst
, " = ");
1656 print_local_index(code
->dst
, group
, bounds
, pma
, domain
);
1658 fprintf(code
->dst
, ";\n");
1660 isl_pw_multi_aff_free(pma
);
1661 isl_set_free(domain
);
1664 static void print_shared_access(struct cuda_gen
*gen
,
1665 __isl_keep isl_set
*shared_domain
, __isl_take isl_set
*access
,
1666 const char *type
, struct cuda_array_ref_group
*group
)
1668 const char *array_name
;
1671 isl_union_map
*sched
;
1672 unsigned nvar
= isl_set_dim(access
, isl_dim_set
);
1675 ctx
= isl_set_get_ctx(access
);
1676 array_name
= isl_set_get_tuple_name(access
);
1677 name
= isl_alloc_array(ctx
, char,
1678 strlen(type
) + sizeof("_shared_") + strlen(array_name
) + 20);
1679 if (group
->array
->n_group
> 1)
1680 sprintf(name
, "%s_shared_%s_%d", type
, array_name
, group
->nr
);
1682 sprintf(name
, "%s_shared_%s", type
, array_name
);
1683 access
= isl_set_set_tuple_name(access
, name
);
1686 sched
= access_schedule(gen
, access
, group
);
1688 n_tile
= gen
->n_block
;
1692 gen
->copy_sched
= isl_map_from_union_map(isl_union_map_copy(sched
));
1693 gen
->copy_group
= group
;
1694 gen
->copy_bound
= group
->shared_bound
;
1696 print_shared_body(gen
, shared_domain
, sched
, nvar
+ n_tile
,
1697 &print_copy_statement
, -1);
1699 isl_union_map_free(sched
);
1700 isl_map_free(gen
->copy_sched
);
1703 /* Return the union of all read (read = 1) and/or write (write = 1)
1704 * access relations in the group.
1706 static __isl_give isl_union_map
*group_access_relation(
1707 struct cuda_array_ref_group
*group
, int read
, int write
)
1710 isl_union_map
*access
;
1712 access
= isl_union_map_empty(isl_map_get_space(group
->access
));
1713 for (i
= 0; i
< group
->n_ref
; ++i
) {
1716 if (!((read
&& group
->refs
[i
]->read
) ||
1717 (write
&& group
->refs
[i
]->write
)))
1719 map_i
= isl_map_copy(group
->refs
[i
]->access
);
1720 access
= isl_union_map_union(access
,
1721 isl_union_map_from_map(map_i
));
1727 /* Check that none of the shared memory tiles involve any strides.
1729 static int no_strides(struct cuda_array_ref_group
*group
)
1732 int n_index
= group
->array
->n_index
;
1734 for (i
= 0; i
< n_index
; ++i
)
1735 if (group
->shared_bound
[i
].shift
)
1741 /* Return a set containing the values of the given index i
1742 * of the elements in the array tile in global memory that corresponds
1743 * to the shared memory copy.
1744 * In particular, if a is the index, we return a set with constraints
1746 * tile_offset <= a <= tile_offset + tile_size - 1
1750 * 0 <= a <= array_size - 1
1753 static __isl_give isl_set
*group_tile_dim(struct cuda_array_ref_group
*group
,
1756 isl_basic_set
*tile
;
1759 isl_local_space
*ls
;
1764 aff
= isl_aff_copy(group
->shared_bound
[i
].lb
);
1765 aff
= isl_aff_add_dims(aff
, isl_dim_in
, 1);
1766 ls
= isl_aff_get_domain_local_space(aff
);
1767 aff
= isl_aff_neg(aff
);
1768 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
1769 c
= isl_inequality_from_aff(isl_aff_copy(aff
));
1770 tile
= isl_basic_set_from_constraint(c
);
1772 aff
= isl_aff_neg(aff
);
1773 aff
= isl_aff_add_constant(aff
, group
->shared_bound
[i
].size
);
1774 aff
= isl_aff_add_constant_si(aff
, -1);
1775 c
= isl_inequality_from_aff(aff
);
1776 tile
= isl_basic_set_add_constraint(tile
, c
);
1778 aff
= isl_aff_zero_on_domain(ls
);
1779 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
1780 c
= isl_inequality_from_aff(aff
);
1781 tile
= isl_basic_set_add_constraint(tile
, c
);
1783 bound
= isl_pw_aff_copy(group
->array
->bound
[i
]);
1784 bound
= isl_pw_aff_add_dims(bound
, isl_dim_in
, 1);
1785 ls
= isl_local_space_from_space(isl_pw_aff_get_domain_space(bound
));
1786 aff
= isl_aff_zero_on_domain(ls
);
1787 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
1788 aff
= isl_aff_add_constant_si(aff
, 1);
1789 dom
= isl_pw_aff_domain(isl_pw_aff_copy(bound
));
1791 tile_set
= isl_pw_aff_ge_set(bound
, isl_pw_aff_alloc(dom
, aff
));
1792 tile_set
= isl_set_align_params(tile_set
, isl_basic_set_get_space(tile
));
1793 tile_set
= isl_set_intersect(tile_set
, isl_set_from_basic_set(tile
));
1798 /* Return a set containing the elements in the array tile in
1799 * global memory that corresponds to the shared memory copy.
1801 static __isl_give isl_set
*group_tile(struct cuda_array_ref_group
*group
)
1804 int n_index
= group
->array
->n_index
;
1807 tile
= group_tile_dim(group
, 0);
1808 for (i
= 1; i
< n_index
; ++i
) {
1811 tile_i
= group_tile_dim(group
, i
);
1812 tile
= isl_set_flat_product(tile
, tile_i
);
1815 tile
= isl_set_set_tuple_name(tile
, group
->array
->name
);
1820 /* Print code for reading into or writing from shared memory
1821 * the given array reference group.
1823 * sched maps the original iteration domains to the shared memory tile loops.
1825 * If we are performing a read from global memory to shared memory,
1826 * if the array involved is not a scalar and if the definition of the
1827 * shared memory tiles does not involve any strides, then we copy
1828 * the entire tile to shared memory. This may result in some extra
1829 * elements getting copied, but it should lead to simpler code
1830 * (which means that fewer registers may be needed) and less divergence.
1832 * Otherwise, we only copy the elements that will be read or have been written
1835 * Note that the absence of stride requirement can easily be lifted.
1836 * We would just need to add constraints of the form
1838 * shift + a = stride * alpha
1840 static int print_group_shared_accesses(struct cuda_gen
*gen
,
1841 struct cuda_array_ref_group
*group
, const char *type
,
1842 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*sched
)
1845 isl_union_map
*access
;
1846 isl_union_set
*uset
;
1847 isl_set
*access_set
;
1849 if (group
->private_bound
)
1851 if (!group
->shared_bound
)
1854 read
= !strcmp(type
, "read");
1856 access
= group_access_relation(group
, read
, !read
);
1857 access
= isl_union_map_apply_domain(access
, isl_union_map_copy(sched
));
1858 uset
= isl_union_map_range(access
);
1860 if (isl_union_set_is_empty(uset
)) {
1861 isl_union_set_free(uset
);
1865 if (read
&& group
->array
->n_index
> 0 && no_strides(group
)) {
1866 isl_union_set_free(uset
);
1867 access_set
= group_tile(group
);
1868 print_shared_access(gen
, shared_domain
, access_set
,
1873 access_set
= isl_set_from_union_set(uset
);
1874 access_set
= isl_set_coalesce(access_set
);
1876 print_shared_access(gen
, shared_domain
, access_set
, type
, group
);
1881 /* Print code for reading into or writing from shared memory at
1882 * the given level (-1 for innermost).
1884 * If we are not printing at the innermost level, then the dimensionality
1885 * of shared_domain may be smaller than gen->shared_len.
1886 * As the rest of the code assumes that the domain of access has
1887 * gen->shared_len dimensions, we therefore may need to embed this domain
1888 * in a higher dimensional space after intersection with shared_domain.
1890 static void print_shared_accesses(struct cuda_gen
*gen
,
1891 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*access
,
1892 const char *type
, int level
)
1898 int shared_len
= isl_set_dim(shared_domain
, isl_dim_set
);
1900 isl_union_map
*sched
;
1902 shared_domain
= isl_set_copy(shared_domain
);
1903 sched
= isl_union_map_copy(gen
->tiled_sched
);
1904 dim
= isl_union_map_get_space(sched
);
1905 proj
= projection(dim
, gen
->tiled_len
, shared_len
);
1906 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
1907 sched
= isl_union_map_intersect_range(sched
,
1908 isl_union_set_from_set(isl_set_copy(shared_domain
)));
1909 if (shared_len
!= gen
->shared_len
) {
1910 dim
= isl_union_map_get_space(sched
);
1911 proj
= projection(dim
, gen
->shared_len
, shared_len
);
1912 proj
= isl_map_reverse(proj
);
1913 shared_domain
= isl_set_apply(shared_domain
,
1914 isl_map_copy(proj
));
1915 sched
= isl_union_map_apply_range(sched
,
1916 isl_union_map_from_map(proj
));
1919 dim
= isl_union_map_get_space(sched
);
1920 par
= parametrization(dim
, gen
->shared_len
, 0, gen
->shared_len
, "g");
1921 sched
= isl_union_map_intersect_range(sched
,
1922 isl_union_set_from_set(par
));
1924 for (i
= 0; i
< gen
->n_array
; ++i
) {
1925 struct cuda_array_info
*array
= &gen
->array
[i
];
1927 for (j
= 0; j
< array
->n_group
; ++j
) {
1928 if (array
->groups
[j
]->print_shared_level
!= level
)
1931 if (print_group_shared_accesses(gen
, array
->groups
[j
],
1932 type
, shared_domain
, sched
))
1937 isl_union_map_free(sched
);
1938 isl_set_free(shared_domain
);
1941 print_indent(gen
->cuda
.kernel_c
, gen
->kernel_code
.indent
);
1942 fprintf(gen
->cuda
.kernel_c
, "__syncthreads();\n");
1946 /* This function is called for each access to an array in some statement
1947 * in the original code.
1948 * Replace that access by an access to shared or (linearized) global memory.
1949 * Since the array in shared memory is just
1950 * a shifted copy of part of the original array, we simply need
1951 * to subtract the lower bound, which was computed
1952 * in can_tile_for_shared_memory.
1953 * If any of the indices is strided, then we first add
1954 * shared_bound[i].shift and divide by shared_bound[i].stride.
1956 * If the given array is accessed directly from global memory,
1957 * we don't need to perform any shifting and simply simplify
1958 * expression in the context of the domain instead.
1960 * If the array space (range of access) has no name, then we are
1961 * accessing an iterator in the original program.
1963 static void print_access(struct cuda_gen
*gen
, __isl_take isl_map
*access
,
1969 struct cuda_array_info
*array
= NULL
;
1971 isl_pw_multi_aff
*pma
;
1974 struct cuda_array_bound
*bounds
= NULL
;
1976 access
= isl_map_align_params(access
,
1977 isl_set_get_space(gen
->stmt_domain
));
1979 data_set
= isl_set_apply(isl_set_copy(gen
->stmt_domain
), access
);
1981 name
= isl_set_get_tuple_name(data_set
);
1984 fprintf(gen
->cuda
.kernel_c
, "(");
1986 struct cuda_array_ref_group
*group
;
1988 for (i
= 0; i
< gen
->n_array
; ++i
) {
1989 if (strcmp(name
, gen
->array
[i
].name
))
1991 array
= &gen
->array
[i
];
1994 group
= array
->groups
[group_nr
];
1995 bounds
= group
->private_bound
;
1997 bounds
= group
->shared_bound
;
1999 if (!bounds
&& cuda_array_is_scalar(array
) && !array
->read_only
)
2000 fprintf(gen
->cuda
.kernel_c
, "*");
2001 print_array_name(gen
->cuda
.kernel_c
, group
);
2003 if (cuda_array_is_scalar(array
)) {
2004 isl_set_free(data_set
);
2008 fprintf(gen
->cuda
.kernel_c
, "[");
2012 n_index
= isl_set_dim(data_set
, isl_dim_set
);
2013 pma
= isl_pw_multi_aff_from_set(data_set
);
2014 pma
= isl_pw_multi_aff_coalesce(pma
);
2016 prn
= isl_printer_to_file(gen
->ctx
, gen
->cuda
.kernel_c
);
2017 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
2020 for (i
= 0; i
+ 1 < n_index
; ++i
)
2021 prn
= isl_printer_print_str(prn
, "(");
2023 for (i
= 0; i
< n_index
; ++i
) {
2026 index
= isl_pw_multi_aff_get_pw_aff(pma
, i
);
2029 prn
= isl_printer_print_pw_aff(prn
, index
);
2030 isl_pw_aff_free(index
);
2034 domain
= isl_set_copy(gen
->stmt_domain
);
2035 domain
= isl_set_params(domain
);
2037 index
= isl_pw_aff_coalesce(index
);
2038 index
= isl_pw_aff_gist(index
, domain
);
2040 index
= shift_index(index
, array
, &bounds
[i
], domain
);
2044 prn
= isl_printer_print_str(prn
, ") * (");
2045 prn
= isl_printer_print_pw_aff(prn
,
2046 array
->local_bound
[i
]);
2047 prn
= isl_printer_print_str(prn
, ") + ");
2049 prn
= isl_printer_print_str(prn
, "][");
2051 prn
= isl_printer_print_pw_aff(prn
, index
);
2052 isl_pw_aff_free(index
);
2055 prn
= isl_printer_print_str(prn
, ")");
2057 prn
= isl_printer_print_str(prn
, "]");
2058 isl_printer_free(prn
);
2060 isl_pw_multi_aff_free(pma
);
2063 static struct cuda_stmt_access
*print_expr(struct cuda_gen
*gen
, FILE *out
,
2064 struct pet_expr
*expr
, struct cuda_stmt_access
*access
, int outer
)
2068 switch (expr
->type
) {
2069 case pet_expr_double
:
2070 fprintf(out
, "%g", expr
->d
);
2072 case pet_expr_access
:
2073 print_access(gen
, isl_map_copy(access
->access
), access
->group
);
2074 access
= access
->next
;
2076 case pet_expr_unary
:
2079 fprintf(out
, " %s ", pet_op_str(expr
->op
));
2080 access
= print_expr(gen
, out
, expr
->args
[pet_un_arg
],
2085 case pet_expr_binary
:
2088 access
= print_expr(gen
, out
, expr
->args
[pet_bin_lhs
],
2090 fprintf(out
, " %s ", pet_op_str(expr
->op
));
2091 access
= print_expr(gen
, out
, expr
->args
[pet_bin_rhs
],
2096 case pet_expr_ternary
:
2099 access
= print_expr(gen
, out
, expr
->args
[pet_ter_cond
],
2101 fprintf(out
, " ? ");
2102 access
= print_expr(gen
, out
, expr
->args
[pet_ter_true
],
2104 fprintf(out
, " : ");
2105 access
= print_expr(gen
, out
, expr
->args
[pet_ter_false
],
2111 fprintf(out
, "%s(", expr
->name
);
2112 for (i
= 0; i
< expr
->n_arg
; ++i
) {
2115 access
= print_expr(gen
, out
, expr
->args
[i
],
2123 static void print_stmt_body(struct cuda_gen
*gen
,
2124 FILE *out
, struct cuda_stmt
*stmt
)
2126 print_expr(gen
, out
, stmt
->body
, stmt
->accesses
, 1);
2127 fprintf(out
, ";\n");
2130 /* This function is called for each leaf in the innermost clast,
2131 * i.e., for each statement.
2132 * We print the statement body, simplifying the accesses based
2135 static void print_statement(struct clast_printer_info
*code
,
2136 struct clast_user_stmt
*u
)
2138 struct cuda_gen
*gen
= code
->user
;
2141 isl_set
*stmt_domain
;
2142 isl_union_map
*stmt_sched
;
2143 isl_union_set
*uset
;
2145 struct cuda_stmt
*stmt
;
2147 nr
= atoi(u
->statement
->name
+ 2);
2148 stmt
= &gen
->stmts
[nr
];
2150 stmt_domain
= extract_host_domain(u
);
2152 stmt_sched
= isl_union_map_intersect_range(
2153 isl_union_map_copy(gen
->local_sched
),
2154 isl_union_set_from_set(extend(stmt_domain
,
2155 gen
->thread_tiled_len
)));
2156 dim
= isl_union_map_get_space(stmt_sched
);
2157 par
= parametrization(dim
, gen
->thread_tiled_len
, 0,
2158 gen
->thread_tiled_len
, "c");
2159 stmt_sched
= isl_union_map_intersect_range(stmt_sched
,
2160 isl_union_set_from_set(par
));
2162 uset
= isl_union_map_domain(stmt_sched
);
2163 dim
= isl_union_set_get_space(uset
);
2164 dim
= isl_space_add_dims(dim
, isl_dim_set
,
2165 isl_set_dim(stmt
->domain
, isl_dim_set
));
2166 dim
= isl_space_set_tuple_name(dim
, isl_dim_set
, u
->statement
->name
);
2167 gen
->stmt_domain
= isl_union_set_extract_set(uset
, dim
);
2168 isl_union_set_free(uset
);
2170 print_indent(code
->dst
, code
->indent
);
2171 print_stmt_body(gen
, code
->dst
, stmt
);
2173 isl_set_free(gen
->stmt_domain
);
2176 static void print_private_access(struct cuda_gen
*gen
,
2177 __isl_keep isl_set
*shared_domain
, __isl_take isl_set
*access
,
2178 const char *type
, struct cuda_array_ref_group
*group
)
2180 const char *array_name
;
2183 unsigned nvar
= isl_set_dim(access
, isl_dim_set
);
2184 isl_union_map
*usched
;
2186 if (isl_set_fast_is_empty(access
)) {
2187 isl_set_free(access
);
2191 ctx
= isl_set_get_ctx(access
);
2192 array_name
= isl_set_get_tuple_name(access
);
2193 name
= isl_alloc_array(ctx
, char,
2194 strlen(type
) + sizeof("_private_") + strlen(array_name
) + 20);
2195 if (group
->array
->n_group
> 1)
2196 sprintf(name
, "%s_private_%s_%d", type
, array_name
, group
->nr
);
2198 sprintf(name
, "%s_private_%s", type
, array_name
);
2199 access
= isl_set_set_tuple_name(access
, name
);
2202 gen
->copy_sched
= shift_access(access
, group
);
2203 gen
->copy_group
= group
;
2204 gen
->copy_bound
= group
->private_bound
;
2206 usched
= isl_union_map_from_map(isl_map_copy(gen
->copy_sched
));
2207 print_shared_body(gen
, shared_domain
, usched
, nvar
,
2208 &print_copy_statement
, 1);
2209 isl_union_map_free(usched
);
2211 isl_map_free(gen
->copy_sched
);
2214 /* Print code for reading into or writing from private memory
2215 * the given array reference group.
2217 * sched maps the original iteration domains to the shared memory tile loops.
2219 static void print_group_private_accesses(struct cuda_gen
*gen
,
2220 struct cuda_array_ref_group
*group
,
2221 const char *type
, __isl_keep isl_set
*shared_domain
,
2222 unsigned first_shared
, int shared_len
, __isl_keep isl_union_map
*sched
)
2225 isl_union_map
*access
;
2226 isl_union_set
*uset
;
2227 isl_set
*access_set
;
2229 if (!group
->private_bound
)
2232 read
= !strcmp(type
, "read");
2234 access
= group_access_relation(group
, read
, !read
);
2235 access
= isl_union_map_apply_domain(access
, isl_union_map_copy(sched
));
2236 access
= isl_union_map_intersect(access
,
2237 isl_union_map_copy(gen
->private_access
));
2238 uset
= isl_union_map_range(access
);
2240 if (isl_union_set_is_empty(uset
)) {
2241 isl_union_set_free(uset
);
2245 access_set
= isl_set_from_union_set(uset
);
2246 access_set
= isl_set_coalesce(access_set
);
2247 access_set
= isl_set_eliminate(access_set
, isl_dim_param
,
2248 first_shared
+ shared_len
,
2249 gen
->shared_len
- shared_len
);
2251 print_private_access(gen
, shared_domain
, access_set
, type
, group
);
2254 /* Print code for reading into or writing from private memory at
2255 * the given level (-1 for innermost).
2257 * If we are not printing at the innermost level, then the dimensionality
2258 * of shared_domain may be smaller than gen->shared_len.
2259 * As the rest of the code assumes that the domain of access has
2260 * gen->shared_len dimensions, we therefore may need to embed this domain
2261 * in a higher dimensional space after intersection with shared_domain.
2263 * This code is very similar to print_shared_accesses.
2264 * The main difference is that we to take into account gen->private_access.
2266 static void print_private_accesses(struct cuda_gen
*gen
,
2267 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*access
,
2268 const char *type
, int level
)
2273 int shared_len
= isl_set_dim(shared_domain
, isl_dim_set
);
2274 unsigned first_shared
;
2275 isl_union_map
*sched
;
2277 shared_domain
= isl_set_copy(shared_domain
);
2278 sched
= isl_union_map_copy(gen
->tiled_sched
);
2279 dim
= isl_union_map_get_space(sched
);
2280 first_shared
= isl_space_dim(dim
, isl_dim_param
);
2281 proj
= projection(dim
, gen
->tiled_len
, shared_len
);
2282 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
2283 sched
= isl_union_map_intersect_range(sched
,
2284 isl_union_set_from_set(isl_set_copy(shared_domain
)));
2285 if (shared_len
!= gen
->shared_len
) {
2286 dim
= isl_union_map_get_space(sched
);
2287 proj
= projection(dim
, gen
->shared_len
, shared_len
);
2288 proj
= isl_map_reverse(proj
);
2289 shared_domain
= isl_set_apply(shared_domain
,
2290 isl_map_copy(proj
));
2291 sched
= isl_union_map_apply_range(sched
,
2292 isl_union_map_from_map(proj
));
2295 for (i
= 0; i
< gen
->n_array
; ++i
) {
2296 struct cuda_array_info
*array
= &gen
->array
[i
];
2298 for (j
= 0; j
< array
->n_group
; ++j
) {
2299 if (array
->groups
[j
]->print_shared_level
!= level
)
2302 print_group_private_accesses(gen
, array
->groups
[j
],
2303 type
, shared_domain
,
2304 first_shared
, shared_len
, sched
);
2308 isl_union_map_free(sched
);
2309 isl_set_free(shared_domain
);
2312 /* Set unroll[j] if the input dimension j is involved in
2313 * the index expression represented by bmap.
2315 static int check_unroll(__isl_take isl_basic_map
*bmap
, void *user
)
2318 int n_in
= isl_basic_map_dim(bmap
, isl_dim_in
);
2319 int n_out
= isl_basic_map_dim(bmap
, isl_dim_out
);
2322 for (i
= 0; i
< n_out
; ++i
) {
2326 ok
= isl_basic_map_has_defining_equality(bmap
,
2327 isl_dim_out
, i
, &c
);
2329 for (j
= 0; j
< n_in
; ++j
)
2330 if (isl_constraint_involves_dims(c
, isl_dim_in
, j
, 1))
2332 isl_constraint_free(c
);
2335 isl_basic_map_free(bmap
);
2339 /* Given an array pos mapping input dimensions to the corresponding
2340 * output dimension, construct the corresponding map.
2342 static __isl_give isl_map
*permutation(__isl_take isl_space
*dim
,
2347 isl_basic_map
*bmap
;
2348 isl_local_space
*ls
;
2350 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
2351 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
);
2352 bmap
= isl_basic_map_universe(isl_space_copy(dim
));
2353 ls
= isl_local_space_from_space(dim
);
2355 for (i
= 0; i
< len
; ++i
) {
2356 c
= isl_equality_alloc(isl_local_space_copy(ls
));
2357 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, -1);
2358 isl_constraint_set_coefficient_si(c
, isl_dim_out
, pos
[i
], 1);
2359 bmap
= isl_basic_map_add_constraint(bmap
, c
);
2361 isl_local_space_free(ls
);
2363 return isl_map_from_basic_map(bmap
);
2366 /* Find all loops involved in any of the index expressions for any of
2367 * the private accesses, move them innermost and then mark them as
2368 * requiring unrolling by setting gen->first_unroll.
2369 * The loops involved should all be parallel because of the checks
2370 * we performed in check_private_group_access. Moving them innermost
2371 * is therefore a valid transformation.
2373 static __isl_give isl_union_map
*interchange_for_unroll(struct cuda_gen
*gen
,
2374 __isl_take isl_union_map
*sched
)
2377 int unroll
[gen
->thread_tiled_len
];
2378 int perm
[gen
->thread_tiled_len
];
2381 int len
= gen
->shared_len
+ gen
->n_parallel
+ gen
->n_block
;
2383 gen
->first_unroll
= -1;
2385 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
)
2387 for (i
= 0; i
< gen
->n_array
; ++i
) {
2388 struct cuda_array_info
*array
= &gen
->array
[i
];
2390 for (j
= 0; j
< array
->n_group
; ++j
) {
2391 isl_union_map
*access
;
2394 if (!array
->groups
[j
]->private_bound
)
2397 access
= group_access_relation(array
->groups
[j
], 1, 1);
2398 access
= isl_union_map_apply_domain(access
,
2399 isl_union_map_copy(sched
));
2401 acc
= isl_map_from_union_map(access
);
2402 isl_map_foreach_basic_map(acc
, &check_unroll
, unroll
);
2408 for (i
= 0; i
< gen
->shared_len
; ++i
)
2412 for (i
= gen
->shared_len
; i
< len
; ++i
)
2419 for (i
= len
; i
< gen
->thread_tiled_len
; ++i
)
2424 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
)
2427 gen
->first_unroll
= 1 + j
;
2428 for (i
= 0; i
< len
; ++i
)
2432 dim
= isl_union_map_get_space(sched
);
2433 permute
= permutation(dim
, perm
, gen
->thread_tiled_len
);
2434 sched
= isl_union_map_apply_range(sched
,
2435 isl_union_map_from_map(permute
));
2440 /* This function is called for each leaf in the clast of the kernel code.
2441 * We first specialize the schedule to the site of the leaf and
2442 * print code for reading into shared memory, performing the actual
2443 * computations and writing from shared memory, with the required
2446 static void print_kernel_user(struct clast_printer_info
*code
,
2447 struct clast_user_stmt
*u
)
2449 struct cuda_gen
*gen
= code
->user
;
2450 isl_set
*shared_domain
;
2452 shared_domain
= extract_entire_host_domain(&u
->stmt
);
2454 print_shared_accesses(gen
, shared_domain
, gen
->read
, "read", -1);
2456 print_private_accesses(gen
, shared_domain
, gen
->read
, "read", -1);
2458 print_shared_body(gen
, shared_domain
, gen
->local_sched
,
2459 gen
->thread_tiled_len
, &print_statement
,
2462 print_private_accesses(gen
, shared_domain
, gen
->write
, "write", -1);
2464 print_indent(gen
->cuda
.kernel_c
, gen
->kernel_code
.indent
);
2465 fprintf(gen
->cuda
.kernel_c
, "__syncthreads();\n");
2467 print_shared_accesses(gen
, shared_domain
, gen
->write
, "write", -1);
2469 isl_set_free(shared_domain
);
2472 /* Check if we need to perform any copying to shared memory at this level
2473 * and if so, print the copying instructions.
2474 * Any array for which we are allowed to print copying instructions at
2475 * this level, but haven't done so already, is printed.
2477 static void copy_to_local(struct cuda_gen
*gen
, __isl_keep isl_set
*domain
)
2483 level
= isl_set_dim(domain
, isl_dim_set
);
2485 for (i
= 0; i
< gen
->n_array
; ++i
) {
2486 struct cuda_array_info
*array
= &gen
->array
[i
];
2488 for (j
= 0; j
< array
->n_group
; ++j
) {
2489 if (array
->groups
[j
]->print_shared_level
>= 0)
2491 if (array
->groups
[j
]->last_shared
>= level
)
2493 array
->groups
[j
]->print_shared_level
= level
;
2499 print_shared_accesses(gen
, domain
, gen
->read
, "read", level
);
2500 print_private_accesses(gen
, domain
, gen
->read
, "read", level
);
2505 /* This function is called for each for loop in the clast,
2506 * right after the opening brace has been printed.
2508 * Print copying instructions to shared or private memory if needed.
2510 static void print_kernel_for_head(struct clast_printer_info
*code
,
2511 struct clast_for
*f
)
2513 struct cuda_gen
*gen
= code
->user
;
2516 domain
= isl_set_from_cloog_domain(cloog_domain_copy(f
->domain
));
2517 copy_to_local(gen
, domain
);
2519 isl_set_free(domain
);
2522 /* Print instructions for copying from shared memory for each array
2523 * for which print_kernel_for_head has added copying instructions
2526 static void copy_from_local(struct cuda_gen
*gen
, __isl_keep isl_set
*domain
)
2532 level
= isl_set_dim(domain
, isl_dim_set
);
2534 for (i
= 0; i
< gen
->n_array
; ++i
) {
2535 struct cuda_array_info
*array
= &gen
->array
[i
];
2537 for (j
= 0; j
< array
->n_group
; ++j
) {
2538 if (array
->groups
[j
]->print_shared_level
!= level
)
2548 print_private_accesses(gen
, domain
, gen
->write
, "write", level
);
2549 print_shared_accesses(gen
, domain
, gen
->write
, "write", level
);
2553 /* This function is called for each for loop in the clast,
2554 * right before the closing brace is printed.
2556 * Print copying instructions from shared or private memory if needed.
2558 static void print_kernel_for_foot(struct clast_printer_info
*code
,
2559 struct clast_for
*f
)
2561 struct cuda_gen
*gen
= code
->user
;
2564 domain
= isl_set_from_cloog_domain(cloog_domain_copy(f
->domain
));
2565 copy_from_local(gen
, domain
);
2567 isl_set_free(domain
);
2570 /* Use CLooG to generate code for the outer gen->shared_first loops
2571 * of the local schedule "sched".
2572 * The pretty printing of this code is handled by print_clast,
2573 * which calls print_kernel_user for each iteration of the shared tile loops.
2575 static void print_cloog_kernel_body(struct cuda_gen
*gen
,
2576 __isl_keep isl_set
*context
, __isl_keep isl_union_map
*sched
)
2579 CloogOptions
*options
;
2580 CloogDomain
*cloog_context
;
2581 CloogUnionDomain
*ud
;
2583 struct clast_stmt
*stmt
;
2586 sched
= isl_union_map_copy(sched
);
2587 sched
= isl_union_map_align_params(sched
, isl_set_get_space(context
));
2589 options
= cloog_options_malloc(gen
->state
);
2590 options
->language
= CLOOG_LANGUAGE_C
;
2591 options
->strides
= 1;
2593 options
->stop
= gen
->shared_len
;
2594 options
->f
= gen
->tiled_len
;
2595 options
->l
= gen
->tiled_len
;
2596 options
->save_domains
= 1;
2597 options
->noscalars
= 1;
2599 ud
= cloog_union_domain_from_isl_union_map(sched
);
2600 for (i
= 0; i
< gen
->shared_len
; ++i
) {
2601 snprintf(name
, sizeof(name
), "g%d", i
);
2602 ud
= cloog_union_domain_set_name(ud
, CLOOG_SCAT
, i
, name
);
2604 cloog_context
= cloog_domain_from_isl_set(isl_set_copy(context
));
2605 input
= cloog_input_alloc(cloog_context
, ud
);
2607 stmt
= cloog_clast_create_from_input(input
, options
);
2609 gen
->kernel_code
.indent
= 4;
2610 gen
->kernel_code
.dst
= gen
->cuda
.kernel_c
;
2611 gen
->kernel_code
.print_user_stmt
= NULL
;
2612 gen
->kernel_code
.print_user_stmt_list
= &print_kernel_user
;
2613 gen
->kernel_code
.print_for_head
= &print_kernel_for_head
;
2614 gen
->kernel_code
.print_for_foot
= &print_kernel_for_foot
;
2615 gen
->kernel_code
.user
= gen
;
2616 copy_to_local(gen
, context
);
2617 print_clast(&gen
->kernel_code
, stmt
);
2618 copy_from_local(gen
, context
);
2620 cloog_clast_free(stmt
);
2621 cloog_options_free(options
);
2624 static void print_kernel_iterators(struct cuda_gen
*gen
)
2627 const char *block_dims
[] = { "blockIdx.x", "blockIdx.y" };
2628 const char *thread_dims
[] = { "threadIdx.x", "threadIdx.y",
2631 if (gen
->n_grid
> 0) {
2632 print_indent(gen
->cuda
.kernel_c
, 4);
2633 fprintf(gen
->cuda
.kernel_c
, "int ");
2634 for (i
= 0; i
< gen
->n_grid
; ++i
) {
2636 fprintf(gen
->cuda
.kernel_c
, ", ");
2637 fprintf(gen
->cuda
.kernel_c
, "b%d = %s",
2638 i
, block_dims
[gen
->n_grid
- 1 - i
]);
2640 fprintf(gen
->cuda
.kernel_c
, ";\n");
2643 if (gen
->n_block
> 0) {
2644 print_indent(gen
->cuda
.kernel_c
, 4);
2645 fprintf(gen
->cuda
.kernel_c
, "int ");
2646 for (i
= 0; i
< gen
->n_block
; ++i
) {
2648 fprintf(gen
->cuda
.kernel_c
, ", ");
2649 fprintf(gen
->cuda
.kernel_c
, "t%d = %s",
2650 i
, thread_dims
[gen
->n_block
- 1 - i
]);
2652 fprintf(gen
->cuda
.kernel_c
, ";\n");
2656 static void print_group_shared_array(struct cuda_gen
*gen
,
2657 struct cuda_array_ref_group
*group
)
2660 struct cuda_array_bound
*bounds
;
2662 bounds
= group
->private_bound
;
2664 bounds
= group
->shared_bound
;
2668 print_indent(gen
->cuda
.kernel_c
, 4);
2669 fprintf(gen
->cuda
.kernel_c
, "%s%s ",
2670 group
->private_bound
? "" : "__shared__ ", group
->array
->type
);
2671 print_array_name(gen
->cuda
.kernel_c
, group
);
2672 for (j
= 0; j
< group
->array
->n_index
; ++j
) {
2673 fprintf(gen
->cuda
.kernel_c
, "[");
2674 isl_int_print(gen
->cuda
.kernel_c
, bounds
[j
].size
, 0);
2675 fprintf(gen
->cuda
.kernel_c
, "]");
2677 fprintf(gen
->cuda
.kernel_c
, ";\n");
2680 static void print_shared_arrays(struct cuda_gen
*gen
)
2684 for (i
= 0; i
< gen
->n_array
; ++i
) {
2685 struct cuda_array_info
*array
= &gen
->array
[i
];
2687 for (j
= 0; j
< array
->n_group
; ++j
)
2688 print_group_shared_array(gen
, array
->groups
[j
]);
2692 static void print_kernel_body(struct cuda_gen
*gen
,
2693 __isl_keep isl_set
*host_domain
, __isl_keep isl_union_map
*sched
)
2697 context
= isl_set_copy(host_domain
);
2698 context
= parametrize(context
, 0, gen
->tile_first
, "h");
2699 context
= isl_set_project_out(context
, isl_dim_set
, 0, gen
->tile_first
);
2700 context
= add_bounded_parameters(context
,
2701 gen
->n_grid
, gen
->grid_dim
, "b");
2703 print_kernel_iterators(gen
);
2704 print_shared_arrays(gen
);
2706 fprintf(gen
->cuda
.kernel_c
, "\n");
2708 print_cloog_kernel_body(gen
, context
, sched
);
2710 isl_set_free(context
);
2713 /* Given a constraint
2715 * a(p,i) + j = g f(e)
2717 * or -a(p,i) - j = g f(e) if sign < 0,
2718 * store a(p,i) in bound->shift and g (stride) in bound->stride.
2719 * a(p,i) is assumed to be an expression in only the parameters.
2721 static void extract_stride(__isl_keep isl_constraint
*c
,
2722 struct cuda_array_bound
*bound
, isl_int stride
, int sign
)
2730 isl_int_set(bound
->stride
, stride
);
2732 dim
= isl_constraint_get_space(c
);
2733 dim
= isl_space_params(dim
);
2735 nparam
= isl_space_dim(dim
, isl_dim_param
);
2739 isl_constraint_get_constant(c
, &v
);
2742 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(dim
));
2743 aff
= isl_aff_set_constant(aff
, v
);
2745 for (i
= 0; i
< nparam
; ++i
) {
2746 isl_constraint_get_coefficient(c
, isl_dim_param
, i
, &v
);
2747 if (isl_int_is_zero(v
))
2751 aff
= isl_aff_add_coefficient(aff
, isl_dim_param
, i
, v
);
2759 /* Given an equality constraint of a map with a single output dimension j,
2760 * check if the constraint is of the form
2762 * a(p,i) + j = g f(e)
2764 * with a(p,i) an expression in the parameters and input dimensions
2765 * and f(e) an expression in the existentially quantified variables.
2766 * If so, and if g is larger than any such g from a previously considered
2767 * constraint, then call extract_stride. to record the stride information
2770 static int check_stride_constraint(__isl_take isl_constraint
*c
, void *user
)
2775 struct cuda_array_bound
*bound
= user
;
2778 isl_int_init(stride
);
2780 n_div
= isl_constraint_dim(c
, isl_dim_div
);
2781 isl_constraint_get_coefficient(c
, isl_dim_out
, 0, &v
);
2783 if (n_div
&& (isl_int_is_one(v
) || isl_int_is_negone(v
))) {
2784 int s
= isl_int_sgn(v
);
2785 isl_int_set_si(stride
, 0);
2786 for (i
= 0; i
< n_div
; ++i
) {
2787 isl_constraint_get_coefficient(c
, isl_dim_div
, i
, &v
);
2788 isl_int_gcd(stride
, stride
, v
);
2790 if (!isl_int_is_zero(stride
) &&
2791 isl_int_gt(stride
, bound
->stride
))
2792 extract_stride(c
, bound
, stride
, s
);
2795 isl_int_clear(stride
);
2798 isl_constraint_free(c
);
2802 /* Given contraints on an array index i, check if we can find
2803 * a shift a(p) and a stride g such that
2805 * a(p) + i = 0 mod g
2807 * If so, record the information in bound and apply the mapping
2808 * i -> (i + a(p))/g to the array index in bounds and return
2809 * the new constraints.
2810 * If not, simply return the original constraints.
2812 static __isl_give isl_basic_map
*check_stride(struct cuda_gen
*gen
,
2813 struct cuda_array_bound
*bound
, __isl_take isl_basic_map
*bounds
)
2816 isl_basic_map
*shift
;
2819 isl_int_set_si(bound
->stride
, -1);
2821 aff
= isl_basic_map_affine_hull(isl_basic_map_copy(bounds
));
2823 isl_basic_map_foreach_constraint(aff
, &check_stride_constraint
, bound
);
2825 isl_basic_map_free(aff
);
2827 if (isl_int_is_neg(bound
->stride
))
2830 aff_shift
= isl_aff_copy(bound
->shift
);
2831 aff_shift
= isl_aff_add_dims(aff_shift
, isl_dim_in
, 1);
2832 aff_shift
= isl_aff_add_coefficient_si(aff_shift
, isl_dim_in
, 0, 1);
2833 aff_shift
= isl_aff_scale_down(aff_shift
, bound
->stride
);
2834 shift
= isl_basic_map_from_aff(aff_shift
);
2836 bound
->shift_map
= isl_basic_map_copy(shift
);
2837 bounds
= isl_basic_map_apply_range(bounds
, shift
);
2842 struct cuda_size_info
{
2843 isl_basic_set
*bset
;
2844 struct cuda_array_bound
*bound
;
2848 /* Given a constraint from the basic set describing the bounds on
2849 * an array index, check if it is a lower bound, say m i >= b(x), and,
2850 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
2851 * upper bound. If so, and if this bound is smaller than any bound
2852 * derived from earlier constraints, set the size to this bound on
2853 * the expression and the lower bound to ceil(b(x)/m).
2855 static int compute_size_in_direction(__isl_take isl_constraint
*c
, void *user
)
2857 struct cuda_size_info
*size
= user
;
2862 nparam
= isl_basic_set_dim(size
->bset
, isl_dim_param
);
2863 n_div
= isl_constraint_dim(c
, isl_dim_div
);
2865 if (isl_constraint_involves_dims(c
, isl_dim_div
, 0, n_div
)) {
2866 isl_constraint_free(c
);
2872 isl_constraint_get_coefficient(c
, isl_dim_set
, size
->pos
, &v
);
2874 if (isl_int_is_pos(v
)) {
2877 enum isl_lp_result res
;
2879 aff
= isl_constraint_get_bound(c
, isl_dim_set
, size
->pos
);
2880 aff
= isl_aff_ceil(aff
);
2882 lb
= isl_aff_copy(aff
);
2884 aff
= isl_aff_neg(aff
);
2885 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, size
->pos
, 1);
2887 res
= isl_basic_set_max(size
->bset
, aff
, &v
);
2890 if (res
== isl_lp_ok
) {
2891 isl_int_add_ui(v
, v
, 1);
2892 if (isl_int_is_neg(size
->bound
->size
) ||
2893 isl_int_lt(v
, size
->bound
->size
)) {
2894 isl_int_set(size
->bound
->size
, v
);
2895 lb
= isl_aff_drop_dims(lb
, isl_dim_in
,
2897 isl_aff_free(size
->bound
->lb
);
2898 size
->bound
->lb
= isl_aff_copy(lb
);
2905 isl_constraint_free(c
);
2910 /* Given a basic map "bounds" that maps parameters and input dimensions
2911 * to a single output dimension, look for an expression in the parameters
2912 * and input dimensions such that the range of the output dimension shifted
2913 * by this expression is a constant.
2915 * In particular, we currently only consider lower bounds on the output
2916 * dimension as candidate expressions.
2918 static int compute_array_dim_size(struct cuda_gen
*gen
,
2919 struct cuda_array_bound
*bound
, __isl_take isl_basic_map
*bounds
)
2921 struct cuda_size_info size
;
2923 bounds
= isl_basic_map_detect_equalities(bounds
);
2924 bounds
= check_stride(gen
, bound
, bounds
);
2926 isl_int_set_si(bound
->size
, -1);
2930 size
.pos
= isl_basic_map_dim(bounds
, isl_dim_in
);
2931 size
.bset
= isl_basic_map_wrap(bounds
);
2932 size
.bset
= isl_basic_set_flatten(size
.bset
);
2933 size
.bset
= isl_set_simple_hull(isl_basic_set_compute_divs(size
.bset
));
2934 isl_basic_set_foreach_constraint(size
.bset
, &compute_size_in_direction
,
2936 isl_basic_set_free(size
.bset
);
2938 return isl_int_is_nonneg(bound
->size
) ? 0 : -1;
2941 /* Check if we can find a shared memory tile for the given array
2942 * based on the given accesses, and if so, put the results
2943 * in array->shared_bound.
2945 * We project the accesses on each index in turn and look for a parametric
2946 * offset such that the size is constant.
2948 static int can_tile_for_shared_memory(struct cuda_gen
*gen
,
2949 struct cuda_array_info
*array
, __isl_keep isl_map
*access
,
2950 struct cuda_array_bound
*bounds
)
2954 for (i
= 0; i
< array
->n_index
; ++i
) {
2956 isl_basic_map
*hull
;
2958 access_i
= isl_map_copy(access
);
2959 access_i
= isl_map_project_out(access_i
, isl_dim_out
, 0, i
);
2960 access_i
= isl_map_project_out(access_i
, isl_dim_out
,
2961 1, array
->n_index
- (i
+ 1));
2962 access_i
= isl_map_compute_divs(access_i
);
2963 hull
= isl_map_simple_hull(access_i
);
2964 if (compute_array_dim_size(gen
, &bounds
[i
], hull
) < 0)
2971 /* Construct a map with input the shared tile loops and the loops that
2972 * will be wrapped around the threads that relates these later loops
2973 * to the thread indices and then projects them out.
2975 static __isl_give isl_map
*compute_privatization(struct cuda_gen
*gen
)
2983 dim
= isl_union_map_get_space(gen
->shared_sched
);
2985 if (gen
->options
->wrap
)
2986 tiling
= wrap(isl_space_copy(dim
), gen
->shared_len
+ gen
->n_block
,
2987 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
2989 tiling
= tile(isl_space_copy(dim
), gen
->shared_len
+ gen
->n_block
,
2990 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
2994 par
= parametrization(dim
, gen
->shared_len
+ 2 * gen
->n_block
,
2995 gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
+ gen
->n_block
,
2998 priv
= isl_map_align_params(priv
, isl_set_get_space(par
));
2999 priv
= isl_map_intersect_range(priv
, par
);
3001 dim
= isl_map_get_space(priv
);
3002 dim
= isl_space_drop_dims(dim
, isl_dim_in
, 0, isl_space_dim(dim
, isl_dim_in
));
3003 dim
= isl_space_drop_dims(dim
, isl_dim_out
, 0, isl_space_dim(dim
, isl_dim_out
));
3004 proj
= projection(dim
, gen
->shared_len
+ 2 * gen
->n_block
,
3007 priv
= isl_map_apply_range(priv
, proj
);
3012 /* Construct a map from domain_dim to domain_dim that increments
3013 * the dimension at position "pos" and leaves all other dimensions
3016 static __isl_give isl_map
*next(__isl_take isl_space
*domain_dim
, int pos
)
3019 int len
= isl_space_dim(domain_dim
, isl_dim_set
);
3021 isl_basic_map
*next
;
3022 isl_local_space
*ls
;
3024 dim
= isl_space_map_from_set(domain_dim
);
3025 next
= isl_basic_map_universe(isl_space_copy(dim
));
3026 ls
= isl_local_space_from_space(dim
);
3028 for (i
= 0; i
< len
; ++i
) {
3031 c
= isl_equality_alloc(isl_local_space_copy(ls
));
3032 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, 1);
3033 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
3035 isl_constraint_set_constant_si(c
, 1);
3036 next
= isl_basic_map_add_constraint(next
, c
);
3039 isl_local_space_free(ls
);
3041 return isl_map_from_basic_map(next
);
3044 /* Check if the given access is coalesced.
3045 * That is, check whether incrementing the dimension that will get
3046 * wrapped over the last thread index results in incrementing
3047 * the last array index.
3049 * This function is only called for access relations without reuse.
3051 static int access_is_coalesced(struct cuda_gen
*gen
,
3052 __isl_keep isl_union_map
*access
)
3055 isl_map
*access_map
;
3056 isl_map
*next_thread_x
;
3057 isl_map
*next_element
;
3061 access
= isl_union_map_copy(access
);
3062 access
= isl_union_map_apply_domain(access
,
3063 isl_union_map_copy(gen
->tiled_sched
));
3064 access_map
= isl_map_from_union_map(access
);
3066 dim
= isl_map_get_space(access_map
);
3067 dim
= isl_space_domain(dim
);
3068 next_thread_x
= next(dim
, gen
->shared_len
+ gen
->n_block
- 1);
3070 dim
= isl_map_get_space(access_map
);
3071 dim
= isl_space_range(dim
);
3072 next_element
= next(dim
, isl_space_dim(dim
, isl_dim_set
) - 1);
3074 map
= isl_map_apply_domain(next_thread_x
, isl_map_copy(access_map
));
3075 map
= isl_map_apply_range(map
, access_map
);
3077 coalesced
= isl_map_is_subset(map
, next_element
);
3079 isl_map_free(next_element
);
3085 /* For the given array reference group, check whether the access is private
3086 * to the thread. That is, check that any given array element
3087 * is only accessed by a single thread.
3088 * We compute an access relation that maps the shared tile loop iterators
3089 * and the shared point loop iterators that will be wrapped over the
3090 * threads to the array elements.
3091 * We actually check that those iterators that will be wrapped
3092 * partition the array space. This check is stricter than necessary
3093 * since several iterations may be mapped onto the same thread
3094 * and then they could be allowed to access the same memory elements,
3095 * but our check does not allow this situation.
3097 * We also check that the index expression only depends on parallel
3098 * loops. That way, we can move those loops innermost and unroll them.
3099 * Again, we use a test that is stricter than necessary.
3100 * We actually check whether the index expression only depends
3101 * on the iterators that are wrapped over the threads.
3102 * These are necessarily parallel, but there may be more parallel loops.
3104 * Combining the injectivity of the first test with the single-valuedness
3105 * of the second test, we simply test for bijectivity.
3107 * If it turns out we can use registers, we compute the private memory
3108 * tile size using can_tile_for_shared_memory, after introducing a dependence
3109 * on the thread indices.
3111 * Before performing any of the above computations, we first check
3112 * if there is any reuse on the reference group. If not, we simply
3113 * return. If, moreover, the access is coalesced then we also remove
3114 * the shared memory tiling since we should just use global memory instead.
3116 static void check_private_group_access(struct cuda_gen
*gen
,
3117 struct cuda_array_ref_group
*group
)
3120 isl_union_map
*access
;
3121 int n_index
= group
->array
->n_index
;
3123 access
= group_access_relation(group
, 1, 1);
3124 if (isl_union_map_is_injective(access
)) {
3125 if (group
->shared_bound
&& access_is_coalesced(gen
, access
)) {
3126 free_bound_list(group
->shared_bound
, n_index
);
3127 group
->shared_bound
= NULL
;
3129 isl_union_map_free(access
);
3132 access
= isl_union_map_apply_domain(access
,
3133 isl_union_map_copy(gen
->shared_sched
));
3135 acc
= isl_map_from_union_map(access
);
3137 if (!isl_map_is_bijective(acc
)) {
3142 group
->private_bound
= create_bound_list(gen
->ctx
, n_index
);
3143 acc
= isl_map_align_params(acc
, isl_map_get_space(gen
->privatization
));
3144 acc
= isl_map_apply_domain(acc
, isl_map_copy(gen
->privatization
));
3145 if (!can_tile_for_shared_memory(gen
, group
->array
, acc
,
3146 group
->private_bound
)) {
3147 free_bound_list(group
->private_bound
, n_index
);
3148 group
->private_bound
= NULL
;
3154 /* Look for the last shared tile loop that affects the offset of the
3155 * shared or private tile and store the result in array->last_shared.
3157 static void set_last_shared(struct cuda_gen
*gen
,
3158 struct cuda_array_ref_group
*group
)
3161 struct cuda_array_bound
*bounds
;
3162 unsigned first_shared
= gen
->first_shared
;
3163 int n_index
= group
->array
->n_index
;
3165 bounds
= group
->private_bound
;
3167 bounds
= group
->shared_bound
;
3171 for (j
= gen
->shared_len
- 1; j
>= 0; --j
) {
3172 for (i
= 0; i
< n_index
; ++i
) {
3177 if (isl_aff_involves_dims(lb
, isl_dim_param
,
3178 first_shared
+ j
, 1))
3181 shift
= bounds
[i
].shift
;
3184 if (isl_aff_involves_dims(shift
, isl_dim_param
,
3185 first_shared
+ j
, 1))
3191 group
->last_shared
= j
;
3194 /* Compute the sizes of all private arrays for the current kernel,
3195 * as well as the offsets of the private pieces in the original arrays.
3196 * If we cannot or don't want to privatize a given array group,
3197 * we use the shared memory tile sizes computed in
3198 * compute_group_shared_bound instead.
3200 * If we have been able to find a private or shared tile,
3201 * we also look for the last shared tile loop that affects the offset
3202 * (and therefore the group tile) and store the result in group->last_shared.
3204 * A privatized copy of all access relations from reference groups that
3205 * are mapped to private memory is stored in gen->privatization.
3207 static void compute_private_size(struct cuda_gen
*gen
)
3210 isl_union_map
*private;
3212 if (!gen
->options
->use_private_memory
)
3215 private = isl_union_map_empty(isl_union_map_get_space(gen
->shared_sched
));
3217 for (i
= 0; i
< gen
->n_array
; ++i
) {
3218 struct cuda_array_info
*array
= &gen
->array
[i
];
3220 for (j
= 0; j
< array
->n_group
; ++j
) {
3221 check_private_group_access(gen
, array
->groups
[j
]);
3223 if (!array
->groups
[j
]->private_bound
)
3226 private = isl_union_map_union(private,
3227 group_access_relation(array
->groups
[j
], 1, 1));
3230 for (j
= 0; j
< array
->n_group
; ++j
) {
3231 array
->groups
[j
]->last_shared
= gen
->shared_len
- 1;
3232 array
->groups
[j
]->print_shared_level
= -1;
3233 set_last_shared(gen
, array
->groups
[j
]);
3237 if (isl_union_map_is_empty(private))
3238 isl_union_map_free(private);
3240 isl_union_map
*priv
;
3242 private = isl_union_map_apply_domain(private,
3243 isl_union_map_copy(gen
->shared_sched
));
3244 priv
= isl_union_map_from_map(isl_map_copy(gen
->privatization
));
3245 private = isl_union_map_apply_domain(private, priv
);
3246 gen
->private_access
= private;
3250 /* Compute the size of the tile specified by the list "bound" of n_index
3251 * cuda_array_bounds in number of elements and put the result in *size.
3253 static void tile_size(unsigned n_index
, struct cuda_array_bound
*bound
,
3258 isl_int_set_si(*size
, 1);
3260 for (i
= 0; i
< n_index
; ++i
)
3261 isl_int_mul(*size
, *size
, bound
[i
].size
);
3264 /* If max_shared_memory is not set to infinity (-1), then make
3265 * sure that the total amount of shared memory required by the
3266 * array reference groups mapped to shared memory is no larger
3267 * than this maximum.
3269 * We apply a greedy approach and discard (keep in global memory)
3270 * those groups that would result in a total memory size that
3271 * is larger than the maximum.
3273 static void check_shared_memory_bound(struct cuda_gen
*gen
)
3278 if (gen
->options
->max_shared_memory
< 0)
3283 isl_int_set_si(left
, gen
->options
->max_shared_memory
);
3285 for (i
= 0; i
< gen
->n_array
; ++i
) {
3286 struct cuda_array_info
*array
= &gen
->array
[i
];
3288 for (j
= 0; j
< array
->n_group
; ++j
) {
3289 struct cuda_array_ref_group
*group
;
3291 group
= array
->groups
[j
];
3292 if (!group
->shared_bound
)
3295 tile_size(array
->n_index
, group
->shared_bound
, &size
);
3296 isl_int_mul_ui(size
, size
, array
->size
);
3298 if (isl_int_le(size
, left
)) {
3299 isl_int_sub(left
, left
, size
);
3303 free_bound_list(group
->shared_bound
, array
->n_index
);
3304 group
->shared_bound
= NULL
;
3308 isl_int_clear(size
);
3309 isl_int_clear(left
);
3312 /* Fill up the groups array with singleton groups, i.e., one group
3313 * per reference, initializing the array, access, write and refs fields.
3314 * In particular the access field is initialized to the scheduled
3315 * access relation of the array reference.
3317 * Return the number of elements initialized, i.e., the number of
3318 * active references in the current kernel.
3320 static int populate_array_references(struct cuda_gen
*gen
,
3321 struct cuda_array_info
*array
, __isl_keep isl_union_map
*sched
,
3322 struct cuda_array_ref_group
**groups
)
3326 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
3329 for (i
= 0; i
< array
->n_ref
; ++i
) {
3330 isl_union_map
*umap
;
3332 struct cuda_array_ref_group
*group
;
3333 struct cuda_stmt_access
*access
= array
->refs
[i
];
3335 map
= isl_map_copy(access
->access
);
3336 umap
= isl_union_map_from_map(map
);
3337 umap
= isl_union_map_apply_domain(umap
,
3338 isl_union_map_copy(sched
));
3340 if (isl_union_map_is_empty(umap
)) {
3341 isl_union_map_free(umap
);
3345 map
= isl_map_from_union_map(umap
);
3346 map
= isl_map_detect_equalities(map
);
3348 group
= isl_calloc_type(ctx
, struct cuda_array_ref_group
);
3350 group
->array
= array
;
3351 group
->access
= map
;
3352 group
->write
= access
->write
;
3353 group
->refs
= &array
->refs
[i
];
3355 groups
[n
++] = group
;
3361 static void free_array_ref_group(struct cuda_array_ref_group
*group
,
3366 free_bound_list(group
->shared_bound
, n_index
);
3367 free_bound_list(group
->private_bound
, n_index
);
3368 isl_map_free(group
->access
);
3373 /* Given a set where the parameters gen->first_shared up to
3374 * gen->first_shared + gen->shared_len represent the tile loops,
3375 * eliminate the innermost of those that have a fixed value
3376 * until we reach one that does not (obviously) have a fixed value.
3378 static __isl_give isl_set
*eliminate_fixed_inner_loops(struct cuda_gen
*gen
,
3379 __isl_take isl_set
*access
)
3383 for (i
= gen
->shared_len
- 1; i
>= 0; --i
) {
3384 int pos
= gen
->first_shared
+ i
;
3385 if (!isl_set_plain_is_fixed(access
, isl_dim_param
, pos
, NULL
))
3387 access
= isl_set_eliminate(access
, isl_dim_param
, pos
, 1);
3392 /* Check if the accessed set of group1 and group2 overlap within
3393 * the innermost loop. In particular, ignore any inner dimension
3394 * with a fixed value.
3395 * The copying to and from shared memory will be performed within
3396 * the innermost actual loop so we are only allowed to consider
3397 * the dimensions up to that innermost loop while checking whether
3398 * two access sets overlap.
3400 static int accesses_overlap(struct cuda_gen
*gen
,
3401 struct cuda_array_ref_group
*group1
,
3402 struct cuda_array_ref_group
*group2
)
3405 isl_set
*access1
, *access2
;
3407 access1
= isl_map_range(isl_map_copy(group1
->access
));
3408 access1
= eliminate_fixed_inner_loops(gen
, access1
);
3409 access2
= isl_map_range(isl_map_copy(group2
->access
));
3410 access2
= eliminate_fixed_inner_loops(gen
, access2
);
3411 access1
= isl_set_intersect(access1
, access2
);
3412 empty
= isl_set_is_empty(access1
);
3413 isl_set_free(access1
);
3418 /* If two groups have overlapping access relations (within the innermost
3419 * loop) and if one of them involves a write, then merge the two groups
3422 * We keep track of the grouping in "leader". leader[j] points to
3423 * an earlier group array element that belongs to the same group,
3424 * or the array element j itself if this element is the first in the group.
3426 * Return the number of group leaders.
3428 static int group_overlapping_writes(struct cuda_gen
*gen
, int n
,
3429 struct cuda_array_ref_group
**groups
, int *leader
)
3434 for (i
= 0; i
< n
; ++i
) {
3436 groups
[l
]->n_ref
= 1;
3437 for (j
= i
- 1; j
>= 0; --j
) {
3440 if (!groups
[l
]->write
&& !groups
[j
]->write
)
3443 if (!accesses_overlap(gen
, groups
[l
], groups
[j
]))
3446 groups
[j
]->access
= isl_map_union(groups
[j
]->access
,
3448 groups
[j
]->write
= 1;
3449 groups
[l
]->access
= NULL
;
3450 groups
[j
]->n_ref
+= groups
[l
]->n_ref
;
3460 /* Compute the size of the shared array corresponding to the given array
3461 * array refrence group, based on the accesses from the current kernel,
3462 * as well as the offset of the shared piece in the original array.
3464 static void compute_group_shared_bound(struct cuda_gen
*gen
,
3465 struct cuda_array_info
*array
, struct cuda_array_ref_group
*group
)
3467 isl_ctx
*ctx
= isl_space_get_ctx(array
->dim
);
3469 if (!gen
->options
->use_shared_memory
)
3471 if (cuda_array_is_read_only_scalar(array
))
3474 group
->shared_bound
= create_bound_list(ctx
, array
->n_index
);
3475 if (!can_tile_for_shared_memory(gen
, array
, group
->access
,
3476 group
->shared_bound
)) {
3477 free_bound_list(group
->shared_bound
, array
->n_index
);
3478 group
->shared_bound
= NULL
;
3482 /* Is the size of the tile specified by "bound" smaller than the sum of
3483 * the sizes of the tiles specified by "bound1" and "bound2"?
3485 static int smaller_tile(unsigned n_index
, struct cuda_array_bound
*bound
,
3486 struct cuda_array_bound
*bound1
, struct cuda_array_bound
*bound2
)
3489 isl_int size
, size1
, size2
;
3492 isl_int_init(size1
);
3493 isl_int_init(size2
);
3495 tile_size(n_index
, bound
, &size
);
3496 tile_size(n_index
, bound1
, &size1
);
3497 tile_size(n_index
, bound2
, &size2
);
3499 isl_int_sub(size
, size
, size1
);
3500 isl_int_sub(size
, size
, size2
);
3501 smaller
= isl_int_is_neg(size
);
3503 isl_int_clear(size2
);
3504 isl_int_clear(size1
);
3505 isl_int_clear(size
);
3510 /* Given an initial grouping of array references and shared memory tiles
3511 * for each group that allows for a shared memory tile, merge two groups
3512 * if both have a shared memory tile, the merged group also has
3513 * a shared memory tile and the size of the tile for the merge group
3514 * is smaller than the sum of the tile sizes of the individual groups.
3516 * Return the number of group leaders after merging.
3518 static int group_common_shared_memory_tile(struct cuda_gen
*gen
,
3519 struct cuda_array_info
*array
, int n
,
3520 struct cuda_array_ref_group
**groups
, int *leader
, int n_group
)
3523 isl_ctx
*ctx
= isl_space_get_ctx(array
->dim
);
3525 for (i
= 0; n_group
> 1 && i
< n
; ++i
) {
3529 if (!groups
[i
]->shared_bound
)
3531 for (j
= i
- 1; j
>= 0; --j
) {
3534 struct cuda_array_bound
*shared_bound
;
3538 if (!groups
[j
]->shared_bound
)
3541 map
= isl_map_intersect(isl_map_copy(groups
[l
]->access
),
3542 isl_map_copy(groups
[j
]->access
));
3543 empty
= isl_map_is_empty(map
);
3549 map
= isl_map_union(isl_map_copy(groups
[l
]->access
),
3550 isl_map_copy(groups
[j
]->access
));
3551 shared_bound
= create_bound_list(ctx
, array
->n_index
);
3552 if (!can_tile_for_shared_memory(gen
, array
, map
,
3554 !smaller_tile(array
->n_index
, shared_bound
,
3555 groups
[l
]->shared_bound
,
3556 groups
[j
]->shared_bound
)) {
3558 free_bound_list(shared_bound
, array
->n_index
);
3562 free_bound_list(groups
[j
]->shared_bound
,
3564 groups
[j
]->shared_bound
= shared_bound
;
3565 isl_map_free(groups
[j
]->access
);
3566 groups
[j
]->access
= map
;
3567 groups
[j
]->n_ref
+= groups
[l
]->n_ref
;
3576 /* Extract an array of array reference groups from the array of references
3577 * and the grouping information in "leader".
3579 * Store the results in array->n_group and array->groups.
3581 static void extract_array_groups(isl_ctx
*ctx
, struct cuda_array_info
*array
,
3582 int n
, struct cuda_array_ref_group
**groups
, int *leader
, int n_group
)
3586 for (i
= 2; i
< n
; ++i
)
3587 leader
[i
] = leader
[leader
[i
]];
3589 array
->n_group
= n_group
;
3590 array
->groups
= isl_alloc_array(ctx
, struct cuda_array_ref_group
*,
3592 assert(array
->groups
);
3595 for (i
= 0; i
< n
; ++i
) {
3597 struct cuda_stmt_access
**refs
;
3599 if (leader
[i
] != i
) {
3600 groups
[i
]->refs
= NULL
;
3601 free_array_ref_group(groups
[i
], array
->n_index
);
3605 refs
= isl_alloc_array(ctx
, struct cuda_stmt_access
*,
3609 for (k
= i
; k
< n
; ++k
)
3610 if (leader
[k
] == i
) {
3611 refs
[l
++] = *groups
[k
]->refs
;
3612 (*groups
[k
]->refs
)->group
= j
;
3615 groups
[i
]->refs
= refs
;
3617 array
->groups
[j
++] = groups
[i
];
3621 /* Group array references that should be considered together when
3622 * deciding whether to access them from private, shared or global memory.
3624 * In particular, if two array references overlap and if one of them
3625 * is a write, then the two references are grouped together.
3626 * Furthermore, if two groups admit a shared memory tile and if the
3627 * combination of the two also admits a shared memory tile, we merge
3630 * During the construction the group->refs field points to a single
3631 * array reference inside the array of array references, while
3632 * group->n_ref contains the number of element in leader that
3633 * (directly or indirectly) point to this group, provided the group
3636 static void group_array_references(struct cuda_gen
*gen
,
3637 struct cuda_array_info
*array
, __isl_keep isl_union_map
*sched
)
3641 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
3642 struct cuda_array_ref_group
**groups
;
3645 groups
= isl_calloc_array(ctx
, struct cuda_array_ref_group
*,
3649 n
= populate_array_references(gen
, array
, sched
, groups
);
3651 leader
= isl_alloc_array(ctx
, int, n
);
3654 n_group
= group_overlapping_writes(gen
, n
, groups
, leader
);
3656 for (i
= 0; i
< n
; ++i
)
3658 compute_group_shared_bound(gen
, array
, groups
[i
]);
3660 n_group
= group_common_shared_memory_tile(gen
, array
, n
, groups
,
3663 extract_array_groups(ctx
, array
, n
, groups
, leader
, n_group
);
3669 /* Take tiled_sched, project it onto the shared tile loops and
3670 * the loops that will be wrapped over the threads,
3671 * parametrize the shared tile loops and store the result in gen->shared_sched.
3672 * The position of the first of these parameters is stored in gen->first_shared.
3673 * Also compute a projection that projects out the loops that will be
3674 * wrapped over the threads and store this projection in gen->shared_proj.
3676 static void compute_shared_sched(struct cuda_gen
*gen
)
3681 isl_union_map
*sched
;
3683 sched
= isl_union_map_copy(gen
->tiled_sched
);
3685 dim
= isl_union_map_get_space(sched
);
3686 gen
->first_shared
= isl_space_dim(dim
, isl_dim_param
);
3687 proj
= projection(dim
, gen
->tiled_len
, gen
->shared_len
+ gen
->n_block
);
3688 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
3690 dim
= isl_union_map_get_space(sched
);
3691 par
= parametrization(dim
, gen
->shared_len
+ gen
->n_block
,
3692 0, gen
->shared_len
, "g");
3693 sched
= isl_union_map_intersect_range(sched
,
3694 isl_union_set_from_set(par
));
3696 dim
= isl_union_map_get_space(sched
);
3697 proj
= projection(dim
, gen
->shared_len
+ gen
->n_block
, gen
->shared_len
);
3699 gen
->shared_sched
= sched
;
3700 gen
->shared_proj
= isl_union_map_from_map(proj
);
3703 /* Group references of all arrays in the program.
3705 static void group_references(struct cuda_gen
*gen
)
3708 isl_union_map
*sched
;
3710 sched
= isl_union_map_apply_range(isl_union_map_copy(gen
->shared_sched
),
3711 isl_union_map_copy(gen
->shared_proj
));
3713 for (i
= 0; i
< gen
->n_array
; ++i
)
3714 group_array_references(gen
, &gen
->array
[i
], sched
);
3716 isl_union_map_free(sched
);
3719 /* Free all array information that is local to the current kernel.
3721 static void free_local_array_info(struct cuda_gen
*gen
)
3725 for (i
= 0; i
< gen
->n_array
; ++i
) {
3726 struct cuda_array_info
*array
= &gen
->array
[i
];
3728 for (j
= 0; j
< array
->n_group
; ++j
)
3729 free_array_ref_group(array
->groups
[j
], array
->n_index
);
3730 free(array
->groups
);
3732 if (array
->n_group
== 0)
3734 for (j
= 0; j
< gen
->array
[i
].n_index
; ++j
) {
3735 isl_pw_aff_free(gen
->array
[i
].local_bound
[j
]);
3736 gen
->array
[i
].local_bound
[j
] = NULL
;
3741 /* The sizes of the arrays on the host that have been computed by
3742 * extract_array_info may depend on the parameters. Use the extra
3743 * constraints on the parameters that are valid at "host_domain"
3744 * to simplify these expressions.
3746 static void localize_bounds(struct cuda_gen
*gen
,
3747 __isl_keep isl_set
*host_domain
)
3752 context
= isl_set_copy(host_domain
);
3753 context
= isl_set_params(host_domain
);
3755 for (i
= 0; i
< gen
->n_array
; ++i
) {
3756 struct cuda_array_info
*array
= &gen
->array
[i
];
3758 if (array
->n_group
== 0)
3761 for (j
= 0; j
< array
->n_index
; ++j
) {
3764 pwaff
= isl_pw_aff_copy(array
->bound
[j
]);
3765 pwaff
= isl_pw_aff_gist(pwaff
, isl_set_copy(context
));
3766 array
->local_bound
[j
] = pwaff
;
3769 isl_set_free(context
);
3772 /* Set gen->tile_len and gen->n_parallel to those of the first statement
3773 * in the statement list u.
3774 * Because of the way the schedule is constructed, the other statements
3775 * in the list, if any, should have the same values for these properties.
3777 static void set_tile_len(struct cuda_gen
*gen
, struct clast_user_stmt
*u
)
3780 struct cuda_stmt
*stmt
;
3782 nr
= atoi(u
->statement
->name
+ 2);
3783 stmt
= &gen
->stmts
[nr
];
3785 gen
->tile_len
= stmt
->tile_len
;
3786 gen
->n_parallel
= stmt
->n_parallel
;
3789 /* Extract a description of the grid, i.e., the possible values
3790 * of the block ids, from gen->tiled_sched.
3791 * The block ids are parameters in gen->tiled_sched.
3792 * We simply need to change them into set dimensions.
3794 static __isl_give isl_set
*extract_grid(struct cuda_gen
*gen
)
3799 grid
= isl_union_map_params(isl_union_map_copy(gen
->tiled_sched
));
3800 grid
= isl_set_from_params(grid
);
3801 grid
= isl_set_add_dims(grid
, isl_dim_set
, gen
->n_grid
);
3802 for (i
= 0; i
< gen
->n_grid
; ++i
) {
3806 snprintf(name
, sizeof(name
), "b%d", i
);
3807 pos
= isl_set_find_dim_by_name(grid
, isl_dim_param
, name
);
3809 grid
= isl_set_equate(grid
, isl_dim_param
, pos
, isl_dim_set
, i
);
3810 grid
= isl_set_project_out(grid
, isl_dim_param
, pos
, 1);
3816 /* Print the effective grid size as a list of the sizes in each
3817 * dimension, from innermost to outermost.
3819 * The grid size specified by the user or set by default
3820 * in read_grid_sizes() and applied in tile_schedule(),
3821 * may be too large for the given code in the sense that
3822 * it may contain blocks that don't need to execute anything.
3823 * We therefore don't print this grid size, but instead the
3824 * smallest grid size that ensures that all blocks that actually
3825 * execute code are included in the grid.
3827 * For each block dimension, we compute the maximal value of the block id
3830 static void print_grid_size(struct cuda_gen
*gen
, __isl_take isl_set
*context
)
3836 if (gen
->n_grid
== 0) {
3837 isl_set_free(context
);
3841 grid
= extract_grid(gen
);
3843 prn
= isl_printer_to_file(gen
->ctx
, gen
->cuda
.host_c
);
3844 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
3846 prn
= isl_printer_print_str(prn
, "(");
3847 for (i
= gen
->n_grid
- 1; i
>= 0; --i
) {
3850 isl_pw_aff
*bound
= isl_set_dim_max(isl_set_copy(grid
), i
);
3852 bound
= isl_pw_aff_coalesce(bound
);
3853 bound
= isl_pw_aff_gist(bound
, isl_set_copy(context
));
3855 space
= isl_pw_aff_get_domain_space(bound
);
3856 one
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
3857 one
= isl_aff_add_constant_si(one
, 1);
3858 bound
= isl_pw_aff_add(bound
, isl_pw_aff_from_aff(one
));
3859 prn
= isl_printer_print_pw_aff(prn
, bound
);
3860 isl_pw_aff_free(bound
);
3863 prn
= isl_printer_print_str(prn
, ", ");
3865 prn
= isl_printer_print_str(prn
, ")");
3867 isl_printer_free(prn
);
3869 isl_set_free(context
);
3872 /* This function is called for each leaf in the clast of the host code.
3873 * We first specialize the schedule to the site of the leaf, compute
3874 * the size of shared memory and then print the body of host code
3875 * and the associated kernel (through a call to print_kernel_body).
3877 static void print_host_user(struct clast_printer_info
*code
,
3878 struct clast_user_stmt
*u
)
3880 struct cuda_gen
*gen
= code
->user
;
3883 isl_set
*host_domain
;
3884 isl_union_map
*access
;
3885 isl_union_map
*local_sched
;
3886 isl_union_set
*arrays
;
3888 set_tile_len(gen
, u
);
3891 host_domain
= extract_entire_host_domain(&u
->stmt
);
3893 local_sched
= isl_union_map_intersect_range(
3894 isl_union_map_copy(gen
->sched
),
3895 isl_union_set_from_set(extend(isl_set_copy(host_domain
),
3896 gen
->untiled_len
)));
3897 access
= isl_union_map_union(isl_union_map_copy(gen
->read
),
3898 isl_union_map_copy(gen
->write
));
3899 access
= isl_union_map_apply_domain(access
,
3900 isl_union_map_copy(local_sched
));
3901 arrays
= isl_union_map_range(access
);
3903 print_indent(code
->dst
, code
->indent
);
3904 fprintf(code
->dst
, "dim3 k%d_dimBlock", gen
->kernel_id
);
3905 print_reverse_list(code
->dst
, gen
->n_block
, gen
->block_dim
);
3906 fprintf(code
->dst
, ";\n");
3908 gen
->tiled_sched
= tile_schedule(gen
, local_sched
);
3909 gen
->tiled_sched
= parametrize_tiled_schedule(gen
, gen
->tiled_sched
);
3910 gen
->tiled_sched
= scale_tile_loops(gen
, gen
->tiled_sched
);
3912 print_indent(code
->dst
, code
->indent
);
3913 fprintf(code
->dst
, "dim3 k%d_dimGrid", gen
->kernel_id
);
3914 print_grid_size(gen
, isl_set_params(isl_set_copy(host_domain
)));
3915 fprintf(code
->dst
, ";\n");
3917 gen
->local_sched
= isl_union_map_copy(gen
->tiled_sched
);
3919 dim
= isl_union_map_get_space(gen
->local_sched
);
3920 par
= parametrization(dim
, gen
->tiled_len
, 0, gen
->shared_len
, "g");
3921 gen
->local_sched
= isl_union_map_intersect_range(gen
->local_sched
,
3922 isl_union_set_from_set(par
));
3924 gen
->local_sched
= thread_tile_schedule(gen
, gen
->local_sched
);
3925 gen
->local_sched
= scale_thread_tile_loops(gen
, gen
->local_sched
);
3927 gen
->private_access
= NULL
;
3928 compute_shared_sched(gen
);
3929 gen
->privatization
= compute_privatization(gen
);
3930 group_references(gen
);
3931 compute_private_size(gen
);
3932 check_shared_memory_bound(gen
);
3933 localize_bounds(gen
, host_domain
);
3935 gen
->local_sched
= interchange_for_unroll(gen
, gen
->local_sched
);
3937 print_kernel_launch(gen
, arrays
);
3939 fprintf(gen
->cuda
.kernel_c
, "{\n");
3941 print_kernel_body(gen
, host_domain
, gen
->tiled_sched
);
3943 fprintf(gen
->cuda
.kernel_c
, "}\n");
3945 free_local_array_info(gen
);
3946 isl_map_free(gen
->privatization
);
3947 isl_union_map_free(gen
->private_access
);
3948 isl_union_map_free(gen
->local_sched
);
3949 isl_union_map_free(gen
->tiled_sched
);
3950 isl_union_map_free(gen
->shared_sched
);
3951 isl_union_map_free(gen
->shared_proj
);
3952 isl_union_set_free(arrays
);
3953 isl_set_free(host_domain
);
3955 free(gen
->tile_size
);
3959 /* Use CLooG to generate code for the outer gen->tile_first loops
3960 * of the global schedule in gen->sched.
3961 * The pretty printing of this code is handled by print_clast,
3962 * which calls print_host_user for each kernel invocation location.
3964 static void print_cloog_host_code(struct cuda_gen
*gen
)
3968 isl_union_map
*sched
;
3969 CloogOptions
*options
;
3970 CloogDomain
*cloog_context
;
3971 CloogUnionDomain
*ud
;
3973 struct clast_stmt
*stmt
;
3976 options
= cloog_options_malloc(gen
->state
);
3977 options
->language
= CLOOG_LANGUAGE_C
;
3979 options
->strides
= 1;
3980 options
->stop
= gen
->tile_first
;
3981 options
->f
= gen
->untiled_len
;
3982 options
->l
= gen
->untiled_len
;
3983 options
->save_domains
= 1;
3984 options
->noscalars
= 1;
3986 sched
= isl_union_map_copy(gen
->sched
);
3987 ud
= cloog_union_domain_from_isl_union_map(sched
);
3988 for (i
= 0; i
< options
->stop
; ++i
) {
3989 snprintf(name
, sizeof(name
), "h%d", i
);
3990 ud
= cloog_union_domain_set_name(ud
, CLOOG_SCAT
, i
, name
);
3992 context
= isl_set_copy(gen
->context
);
3993 cloog_context
= cloog_domain_from_isl_set(context
);
3994 input
= cloog_input_alloc(cloog_context
, ud
);
3996 stmt
= cloog_clast_create_from_input(input
, options
);
3998 gen
->code
.indent
= 0;
3999 gen
->code
.dst
= gen
->cuda
.host_c
;
4000 gen
->code
.print_user_stmt
= NULL
;
4001 gen
->code
.print_user_stmt_list
= &print_host_user
;
4002 gen
->code
.print_for_head
= NULL
;
4003 gen
->code
.print_for_foot
= NULL
;
4004 gen
->code
.user
= gen
;
4005 print_clast(&gen
->code
, stmt
);
4007 cloog_clast_free(stmt
);
4008 cloog_options_free(options
);
4009 fprintf(gen
->cuda
.host_c
, "\n");
4012 void print_cuda_macros(struct cuda_gen
*gen
)
4014 const char *macros
=
4015 "#define cudaCheckReturn(ret) assert((ret) == cudaSuccess)\n"
4016 "#define cudaCheckKernel()"
4017 " assert(cudaGetLastError() == cudaSuccess)\n\n";
4018 fputs(macros
, gen
->cuda
.host_c
);
4021 void print_host_code(struct cuda_gen
*gen
)
4023 fprintf(gen
->cuda
.host_c
, "{\n");
4024 print_cloog_macros(gen
->cuda
.host_c
);
4025 print_cloog_macros(gen
->cuda
.kernel_c
);
4027 print_cuda_macros(gen
);
4029 declare_device_arrays(gen
);
4031 allocate_device_arrays(gen
);
4032 copy_arrays_to_device(gen
);
4035 print_cloog_host_code(gen
);
4037 copy_arrays_from_device(gen
);
4038 free_device_arrays(gen
);
4040 fprintf(gen
->cuda
.host_c
, "}\n");
4043 __isl_give isl_set
*add_context_from_str(__isl_take isl_set
*set
,
4052 ctx
= isl_set_get_ctx(set
);
4053 context
= isl_set_read_from_str(ctx
, str
);
4054 context
= isl_set_align_params(context
, isl_set_get_space(set
));
4055 set
= isl_set_intersect(set
, context
);
4060 __isl_give isl_union_map
*extract_sizes_from_str(isl_ctx
*ctx
, const char *str
)
4064 return isl_union_map_read_from_str(ctx
, str
);
4067 /* Return the union of all iteration domains of the gen->stmts[i].
4069 static __isl_give isl_union_set
*extract_domain(struct cuda_gen
*gen
)
4072 isl_union_set
*domain
;
4074 domain
= isl_union_set_empty(isl_set_get_space(gen
->context
));
4075 for (i
= 0; i
< gen
->n_stmts
; ++i
) {
4078 domain_i
= isl_set_copy(gen
->stmts
[i
].domain
);
4079 domain
= isl_union_set_union(domain
,
4080 isl_union_set_from_set(domain_i
));
4086 /* Information about the outermost tilable bands in the forest of bands.
4088 * tile_len and n_parallel are only sets on band_info structures
4089 * that correspond to outermost bands. For other bands (in particular,
4090 * ancestors of the outermost bands), n_parallal is set to 0.
4092 * prefix is the (padded) schedule leading up to the outermost tilable bands.
4094 * tile_first is the number of schedule dimensions in prefix.
4096 * suffix is the schedule of the outermost tilable bands and their descendants.
4099 struct cuda_gen
*gen
;
4103 isl_union_map
*prefix
;
4104 isl_union_map
*suffix
;
4107 /* Set tile_len and n_parallel of the statement to that of
4108 * their outermost band, recorded in the band_info.
4110 static int set_stmt_tile_len(__isl_take isl_map
*map
, void *user
)
4112 struct band_info
*info
= user
;
4114 struct cuda_stmt
*stmt
;
4116 nr
= atoi(isl_map_get_tuple_name(map
, isl_dim_in
) + 2);
4117 stmt
= &info
->gen
->stmts
[nr
];
4119 stmt
->tile_len
= info
->tile_len
;
4120 stmt
->n_parallel
= info
->n_parallel
;
4127 static void list_select_outer_band(struct cuda_gen
*gen
,
4128 __isl_take isl_band_list
*list
, int pos
, struct band_info
*list_info
);
4130 /* Check if this band has any parallel loops. If so, take it as
4131 * the outermost tilable band. If not, continue looking for the
4132 * outermost tilable band in the children of the current band.
4134 static void band_select_outer_band(struct cuda_gen
*gen
,
4135 __isl_take isl_band
*band
, int pos
, struct band_info
*info
)
4137 int n
= isl_band_n_member(band
);
4140 for (n_parallel
= 0; n_parallel
< n
; ++n_parallel
)
4141 if (!isl_band_member_is_zero_distance(band
, n_parallel
))
4144 info
->n_parallel
= n_parallel
;
4147 info
->tile_first
= pos
;
4149 info
->prefix
= isl_band_get_prefix_schedule(band
);
4150 info
->suffix
= isl_union_map_flat_range_product(
4151 isl_band_get_partial_schedule(band
),
4152 isl_band_get_suffix_schedule(band
));
4153 isl_union_map_foreach_map(info
->prefix
,
4154 &set_stmt_tile_len
, info
);
4155 } else if (isl_band_has_children(band
)) {
4156 isl_band_list
*children
;
4157 children
= isl_band_get_children(band
);
4158 list_select_outer_band(gen
, children
, pos
+ n
, info
);
4161 info
->tile_first
= pos
+ n
;
4163 info
->prefix
= isl_union_map_flat_range_product(
4164 isl_band_get_prefix_schedule(band
),
4165 isl_band_get_partial_schedule(band
));
4166 info
->suffix
= isl_band_get_suffix_schedule(band
);
4167 isl_union_map_foreach_map(info
->prefix
,
4168 &set_stmt_tile_len
, info
);
4171 isl_band_free(band
);
4174 /* Comparison function that returns a non-zero value for band_infos
4175 * with different tile_len fields or different n_parallel fields.
4177 static int cmp_band(const void *p1
, const void *p2
)
4179 const struct band_info
*info1
= p1
;
4180 const struct band_info
*info2
= p2
;
4182 if (info1
->tile_len
!= info2
->tile_len
)
4183 return info1
->tile_len
- info2
->tile_len
;
4185 return info1
->n_parallel
- info2
->n_parallel
;
4188 /* Extend "umap" with coordinates with fixed value "val"
4189 * to a total length of "dst_len", assuming the original dimension is "src_len".
4191 static __isl_give isl_union_map
*extend_range(__isl_take isl_union_map
*umap
,
4192 int src_len
, int dst_len
, int val
)
4198 dim
= isl_union_map_get_space(umap
);
4199 map
= isl_map_reverse(projection(dim
, dst_len
, src_len
));
4200 for (i
= src_len
; i
< dst_len
; ++i
)
4201 map
= isl_map_fix_si(map
, isl_dim_out
, i
, val
);
4203 umap
= isl_union_map_apply_range(umap
, isl_union_map_from_map(map
));
4208 /* Group bands with the same values for tile_len and n_parallel.
4209 * The prefix schedule is then extended with a fixed coordinate that
4210 * is different for each such group.
4211 * Note that the actual values for this coordinate are not important.
4212 * The bands have already been effectively separated at a higher level
4213 * or they are independent and may be executed in parallel.
4214 * The list of band_info has been sorted before this functions is called.
4216 static void separate_bands(struct band_info
*info
, int n
)
4221 for (i
= 0; i
< n
; ++i
) {
4222 int l
= info
[i
].tile_first
;
4225 (info
[i
].tile_len
!= info
[i
- 1].tile_len
||
4226 info
[i
].n_parallel
!= info
[i
- 1].n_parallel
))
4229 info
[i
].prefix
= extend_range(info
[i
].prefix
,
4231 info
[i
].tile_first
= l
+ 1;
4235 /* Select the outermost bands in the elements of the list, align
4236 * their prefix schedules, separate bands with different values
4237 * for tile_len and/or n_parallel and then combine the resulting
4238 * prefix and suffix schedules into a single pair of prefix and
4239 * suffix schedules for the entire list.
4241 static void list_select_outer_band(struct cuda_gen
*gen
,
4242 __isl_take isl_band_list
*list
, int pos
, struct band_info
*list_info
)
4246 int n
= isl_band_list_n_band(list
);
4247 isl_ctx
*ctx
= isl_band_list_get_ctx(list
);
4248 struct band_info
*info
;
4250 isl_union_map
*prefix
;
4251 isl_union_map
*suffix
;
4254 info
= isl_calloc_array(ctx
, struct band_info
, n
);
4258 for (i
= 0; i
< n
; ++i
) {
4259 band
= isl_band_list_get_band(list
, i
);
4260 band_select_outer_band(gen
, band
, pos
, &info
[i
]);
4261 if (info
[i
].tile_first
> max_tile_first
)
4262 max_tile_first
= info
[i
].tile_first
;
4265 for (i
= 0; i
< n
; ++i
) {
4266 if (info
[i
].tile_first
== max_tile_first
)
4268 info
[i
].prefix
= extend_range(info
[i
].prefix
,
4269 info
[i
].tile_first
, max_tile_first
, 0);
4270 info
[i
].tile_first
= max_tile_first
;
4273 qsort(info
, n
, sizeof(struct band_info
), &cmp_band
);
4275 for (i
= 0; i
< n
- 1; ++i
)
4276 if (info
[i
].tile_len
!= info
[i
+ 1].tile_len
||
4277 info
[i
].n_parallel
!= info
[i
+ 1].n_parallel
)
4281 separate_bands(info
, n
);
4283 prefix
= info
[0].prefix
;
4284 suffix
= info
[0].suffix
;
4286 for (i
= 1; i
< n
; ++i
) {
4287 prefix
= isl_union_map_union(prefix
, info
[i
].prefix
);
4288 suffix
= isl_union_map_union(suffix
, info
[i
].suffix
);
4291 list_info
->tile_first
= info
[0].tile_first
;
4292 list_info
->tile_len
= -1;
4293 list_info
->prefix
= prefix
;
4294 list_info
->suffix
= suffix
;
4296 isl_band_list_free(list
);
4300 /* Set max_out to the maximal number of output dimensions over
4303 static int update_max_out(__isl_take isl_map
*map
, void *user
)
4305 int *max_out
= user
;
4306 int n_out
= isl_map_dim(map
, isl_dim_out
);
4308 if (n_out
> *max_out
)
4315 struct align_range_data
{
4320 /* Extend the dimension of the range of the given map to data->max_out and
4321 * then add the result to data->res.
4323 static int map_align_range(__isl_take isl_map
*map
, void *user
)
4325 struct align_range_data
*data
= user
;
4329 int n_out
= isl_map_dim(map
, isl_dim_out
);
4331 dim
= isl_union_map_get_space(data
->res
);
4332 proj
= isl_map_reverse(projection(dim
, data
->max_out
, n_out
));
4333 for (i
= n_out
; i
< data
->max_out
; ++i
)
4334 proj
= isl_map_fix_si(proj
, isl_dim_out
, i
, 0);
4336 map
= isl_map_apply_range(map
, proj
);
4338 data
->res
= isl_union_map_add_map(data
->res
, map
);
4343 /* Extend the ranges of the maps in the union map such they all have
4344 * the same dimension.
4346 static __isl_give isl_union_map
*align_range(__isl_take isl_union_map
*umap
)
4348 struct align_range_data data
;
4351 isl_union_map_foreach_map(umap
, &update_max_out
, &data
.max_out
);
4353 data
.res
= isl_union_map_empty(isl_union_map_get_space(umap
));
4354 isl_union_map_foreach_map(umap
, &map_align_range
, &data
);
4356 isl_union_map_free(umap
);
4360 /* Select the outermost tilable band that (by construction)
4361 * has at least one parallel loop.
4362 * The starting position of the aligned band is stored in the pair
4364 * The sizes and number of parallel loops may be different in different
4365 * parts of the band forest and are therefore stored in the cuda_stmts.
4367 * Return the complete schedule, with the tilable bands aligned
4368 * at gen->tile_first and padded with zero, if needed.
4370 static __isl_give isl_union_map
*select_outer_tilable_band(struct cuda_gen
*gen
,
4371 __isl_keep isl_schedule
*schedule
)
4373 isl_band_list
*list
;
4374 struct band_info info
;
4376 gen
->n_parallel
= 0;
4379 list
= isl_schedule_get_band_forest(schedule
);
4381 list_select_outer_band(gen
, list
, 0, &info
);
4383 gen
->tile_first
= info
.tile_first
;
4384 info
.suffix
= align_range(info
.suffix
);
4386 return isl_union_map_flat_range_product(info
.prefix
, info
.suffix
);
4389 /* Set gen->untiled_len to the number of scheduling dimensions
4390 * for the schedule of the first domain.
4391 * We assume here that this number is the same for all domains.
4393 static int set_untiled_len(__isl_take isl_map
*map
, void *user
)
4395 unsigned *untiled_len
= user
;
4397 *untiled_len
= isl_map_dim(map
, isl_dim_out
);
4403 /* Compute an appropriate schedule based on the accesses in
4404 * gen->read and gen->write.
4406 * We first compute dependences and then use those to compute
4407 * a schedule that has a parallel loop in each tilable band.
4408 * Finally, we select the outermost tilable band.
4410 static void compute_schedule(struct cuda_gen
*gen
,
4411 __isl_take isl_union_map
*sched
)
4413 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
4414 isl_union_set
*domain
;
4415 isl_union_map
*empty
;
4416 isl_union_map
*dep_raw
, *dep2
, *dep3
, *dep
;
4417 isl_union_map
*uninitialized
;
4418 isl_schedule
*schedule
;
4420 empty
= isl_union_map_empty(isl_union_map_get_space(sched
));
4422 isl_union_map_compute_flow(isl_union_map_copy(gen
->read
),
4423 isl_union_map_copy(gen
->write
), empty
,
4424 isl_union_map_copy(sched
),
4425 &dep_raw
, NULL
, &uninitialized
, NULL
);
4426 isl_union_map_compute_flow(isl_union_map_copy(gen
->write
),
4427 isl_union_map_copy(gen
->write
),
4428 isl_union_map_copy(gen
->read
),
4429 isl_union_map_copy(sched
),
4430 &dep2
, &dep3
, NULL
, NULL
);
4431 isl_union_map_free(sched
);
4433 gen
->copy_in
= isl_union_map_range(uninitialized
);
4435 dep
= isl_union_map_union(dep2
, dep3
);
4436 dep
= isl_union_map_union(dep
, dep_raw
);
4437 dep
= isl_union_map_coalesce(dep
);
4439 domain
= extract_domain(gen
);
4440 schedule
= isl_union_set_compute_schedule(isl_union_set_copy(domain
),
4441 isl_union_map_copy(dep
), dep
);
4443 sched
= select_outer_tilable_band(gen
, schedule
);
4445 isl_union_map_foreach_map(sched
, &set_untiled_len
, &gen
->untiled_len
);
4446 sched
= isl_union_map_intersect_domain(sched
, domain
);
4449 isl_schedule_free(schedule
);
4452 static struct cuda_stmt_access
**expr_extract_access(struct pet_expr
*expr
,
4453 struct cuda_stmt_access
**next_access
)
4455 struct cuda_stmt_access
*access
;
4456 isl_ctx
*ctx
= isl_map_get_ctx(expr
->acc
.access
);
4458 access
= isl_alloc_type(ctx
, struct cuda_stmt_access
);
4460 access
->next
= NULL
;
4461 access
->read
= expr
->acc
.read
;
4462 access
->write
= expr
->acc
.write
;
4463 access
->access
= isl_map_copy(expr
->acc
.access
);
4465 *next_access
= access
;
4466 next_access
= &(*next_access
)->next
;
4470 static struct cuda_stmt_access
**expr_extract_accesses(struct pet_expr
*expr
,
4471 struct cuda_stmt_access
**next_access
)
4475 for (i
= 0; i
< expr
->n_arg
; ++i
)
4476 next_access
= expr_extract_accesses(expr
->args
[i
],
4479 if (expr
->type
== pet_expr_access
)
4480 next_access
= expr_extract_access(expr
, next_access
);
4485 static void pet_stmt_extract_accesses(struct cuda_stmt
*stmt
)
4487 struct cuda_stmt_access
**next_access
= &stmt
->accesses
;
4489 stmt
->accesses
= NULL
;
4490 expr_extract_accesses(stmt
->body
, next_access
);
4493 /* Return an array of cuda_stmt representing the statements in "scop".
4495 static struct cuda_stmt
*extract_stmts(isl_ctx
*ctx
, struct pet_scop
*scop
,
4496 __isl_keep isl_set
*context
)
4499 struct cuda_stmt
*stmts
;
4501 stmts
= isl_calloc_array(ctx
, struct cuda_stmt
, scop
->n_stmt
);
4504 for (i
= 0; i
< scop
->n_stmt
; ++i
) {
4505 struct cuda_stmt
*s
= &stmts
[i
];
4507 s
->domain
= isl_set_copy(scop
->stmts
[i
]->domain
);
4508 s
->domain
= isl_set_intersect_params(s
->domain
,
4509 isl_set_copy(context
));
4510 s
->body
= scop
->stmts
[i
]->body
;
4511 pet_stmt_extract_accesses(s
);
4517 /* Replace the scop in the "input" file by equivalent code
4518 * that uses the GPU. "scop" is assumed to correspond to this scop.
4520 * We first compute a schedule that respects the dependences
4521 * of the original program and select the outermost band
4522 * of tilable dimensions that has at least one parallel loop.
4523 * We then have three blocks of dimensions
4527 * The tilable band "B" is first tiled according to "tile" sizes, resulting
4532 * For each iteration of the T loop and for each array, we compute
4533 * the array elements accessed by that iteration, construct a rectangular
4534 * box around it and shift it to the origin. The result is used
4535 * as shared memory for the array.
4537 * We then split off at most 2 parallel loops from the T loops and
4538 * at most 3 parallel loops from the P loops
4542 * The T1/P1 loops are then tiled or "wrapped" over the blocks/threads,
4543 * according to "grid"/"block" sizes.
4545 * H T1T T1P T2 P1T P1P P2 G
4547 * Finally, the T1P and P1P iterators are equated to the block and
4548 * thread dimensions respectively and so are effectively removed.
4549 * The H loops are run on the host. The T1T, T2, P1T, P2 and G loops
4550 * are run on the GPU.
4552 * Code is generated in three stages. We first generate code for the
4553 * host (the H loops), with iterators h%d. Then, for each leaf node
4554 * of the resulting AST, we generate code for the shared loops (up to
4555 * and including T2), with iterators g%d and after equating the H loops
4556 * to h%d parameters and the T1P loops to the block dimensions.
4557 * Finally, we generate code for the remaining loops in a similar fashion.
4559 int cuda_pet(isl_ctx
*ctx
, struct pet_scop
*scop
, struct ppcg_options
*options
,
4562 isl_union_map
*sched
;
4563 struct cuda_gen gen
;
4568 scop
= pet_scop_align_params(scop
);
4571 gen
.context
= isl_set_copy(scop
->context
);
4572 gen
.context
= add_context_from_str(gen
.context
, options
->ctx
);
4573 gen
.sizes
= extract_sizes_from_str(ctx
, options
->sizes
);
4574 gen
.n_stmts
= scop
->n_stmt
;
4575 gen
.stmts
= extract_stmts(ctx
, scop
, gen
.context
);
4576 gen
.read
= pet_scop_collect_reads(scop
);
4577 gen
.write
= pet_scop_collect_writes(scop
);
4578 gen
.options
= options
;
4579 gen
.state
= cloog_isl_state_malloc(gen
.ctx
);
4582 cuda_open_files(&gen
.cuda
, input
);
4584 collect_array_info(&gen
);
4586 sched
= pet_scop_collect_schedule(scop
);
4588 compute_schedule(&gen
, sched
);
4590 print_host_code(&gen
);
4592 cloog_state_free(gen
.state
);
4593 clear_cuda_gen(&gen
);
4595 cuda_close_files(&gen
.cuda
);