2 * Copyright 2010-2011 INRIA Saclay
4 * Use of this software is governed by the GNU LGPLv2.1 license
6 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
7 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
14 #include <isl/polynomial.h>
15 #include <isl/union_set.h>
20 #include <isl/schedule.h>
21 #include <isl/options.h>
22 #include <cloog/isl/cloog.h>
25 #include "cuda_common.h"
28 #include "ppcg_options.h"
30 /* The fields stride, shift and shift_map only contain valid information
32 * If so, they express that current index is such that if you add shift,
33 * then the result is always a multiple of stride.
34 * shift_map contains the mapping
36 * i -> (i + shift)/stride
38 struct cuda_array_bound
{
44 isl_basic_map
*shift_map
;
47 struct cuda_array_info
;
49 /* A group of array references in a kernel that should be handled together.
50 * If private_bound is not NULL, then it is mapped to registers.
51 * Otherwise, if shared_bound is not NULL, it is mapped to shared memory.
52 * Otherwise, it is accessed from global memory.
54 struct cuda_array_ref_group
{
55 /* The references in this group access this array. */
56 struct cuda_array_info
*array
;
57 /* Position of this group in the list of reference groups of array. */
60 /* The following fields are use during the construction of the groups.
61 * access is the combined access relation relative to the shared
63 * write is set if any access in the group is a write.
68 /* For each index, size and offset of piece in shared memory. */
69 struct cuda_array_bound
*shared_bound
;
71 /* For each index, size and offset of piece in private memory. */
72 struct cuda_array_bound
*private_bound
;
74 /* References in this group; point to elements of a linked list. */
76 struct cuda_stmt_access
**refs
;
79 struct cuda_array_info
{
83 /* Name of the array. */
85 /* Number of indices. */
87 /* For each index, a bound on the array in that direction. */
89 /* For each index, bound[i] specialized to the current kernel. */
90 isl_pw_aff
**local_bound
;
92 /* All references to this array; point to elements of a linked list. */
94 struct cuda_stmt_access
**refs
;
96 /* The reference groups associated to this array. */
98 struct cuda_array_ref_group
**groups
;
100 /* For scalars, is this scalar read-only within the entire program? */
103 /* Last shared memory tile dimension that affects tile of this array. */
105 /* Dimension at which copying to/from shared memory is printed.
106 * if >= 0, then the value is >= last_shared
107 * if -1, then the copying is done at the leaf level.
109 int print_shared_level
;
112 /* Print the name of the local copy of a given group of array references.
114 static void print_array_name(FILE *out
, struct cuda_array_ref_group
*group
)
118 if (group
->private_bound
)
119 fprintf(out
, "private_");
120 else if (group
->shared_bound
)
121 fprintf(out
, "shared_");
124 fprintf(out
, "%s", group
->array
->name
);
125 if (!global
&& group
->array
->n_group
> 1)
126 fprintf(out
, "_%d", group
->nr
);
129 /* Collect all references to the given array and store pointers to them
132 static void collect_references(struct cuda_gen
*gen
,
133 struct cuda_array_info
*array
)
139 for (i
= 0; i
< gen
->n_stmts
; ++i
) {
140 struct cuda_stmt
*stmt
= &gen
->stmts
[i
];
141 struct cuda_stmt_access
*access
;
143 for (access
= stmt
->accesses
; access
; access
= access
->next
) {
145 name
= isl_map_get_tuple_name(access
->access
,
147 if (name
&& !strcmp(array
->name
, name
))
153 array
->refs
= isl_alloc_array(gen
->ctx
, struct cuda_stmt_access
*, n
);
157 for (i
= 0; i
< gen
->n_stmts
; ++i
) {
158 struct cuda_stmt
*stmt
= &gen
->stmts
[i
];
159 struct cuda_stmt_access
*access
;
161 for (access
= stmt
->accesses
; access
; access
= access
->next
) {
163 name
= isl_map_get_tuple_name(access
->access
,
165 if (!name
|| strcmp(array
->name
, name
))
168 array
->refs
[n
++] = access
;
173 static struct cuda_array_bound
*create_bound_list(isl_ctx
*ctx
, int n_index
)
176 struct cuda_array_bound
*bound
;
178 bound
= isl_alloc_array(ctx
, struct cuda_array_bound
, n_index
);
181 for (i
= 0; i
< n_index
; ++i
) {
182 isl_int_init(bound
[i
].size
);
184 isl_int_init(bound
[i
].stride
);
185 bound
[i
].shift
= NULL
;
186 bound
[i
].shift_map
= NULL
;
192 static void free_bound_list(struct cuda_array_bound
*bound
, int n_index
)
199 for (j
= 0; j
< n_index
; ++j
) {
200 isl_int_clear(bound
[j
].size
);
201 isl_int_clear(bound
[j
].stride
);
202 isl_aff_free(bound
[j
].lb
);
203 isl_aff_free(bound
[j
].shift
);
204 isl_basic_map_free(bound
[j
].shift_map
);
209 static struct pet_array
*find_array(struct pet_scop
*scop
,
210 __isl_keep isl_set
*accessed
)
215 id
= isl_set_get_tuple_id(accessed
);
217 for (i
= 0; i
< scop
->n_array
; ++i
) {
220 id_i
= isl_set_get_tuple_id(scop
->arrays
[i
]->extent
);
227 return i
< scop
->n_array
? scop
->arrays
[i
] : NULL
;
230 /* Compute bounds on the host arrays based on the accessed elements
231 * and collect all references to the array.
233 * If the array is zero-dimensional, i.e., a scalar, we check
234 * whether it is read-only.
236 static int extract_array_info(__isl_take isl_set
*array
, void *user
)
239 struct cuda_gen
*gen
= (struct cuda_gen
*)user
;
243 isl_pw_aff
**local_bounds
;
244 struct pet_array
*pa
;
246 n_index
= isl_set_dim(array
, isl_dim_set
);
247 name
= isl_set_get_tuple_name(array
);
248 bounds
= isl_alloc_array(isl_set_get_ctx(array
),
249 isl_pw_aff
*, n_index
);
251 local_bounds
= isl_calloc_array(isl_set_get_ctx(array
),
252 isl_pw_aff
*, n_index
);
253 assert(local_bounds
);
254 gen
->array
[gen
->n_array
].dim
= isl_set_get_space(array
);
255 gen
->array
[gen
->n_array
].name
= strdup(name
);
256 gen
->array
[gen
->n_array
].n_index
= n_index
;
257 gen
->array
[gen
->n_array
].bound
= bounds
;
258 gen
->array
[gen
->n_array
].local_bound
= local_bounds
;
260 pa
= find_array(gen
->scop
, array
);
263 gen
->array
[gen
->n_array
].type
= strdup(pa
->element_type
);
267 isl_union_map
*write
;
270 write
= isl_union_map_copy(gen
->write
);
271 space
= isl_set_universe(isl_set_get_space(array
));
272 write
= isl_union_map_intersect_range(write
,
273 isl_union_set_from_set(space
));
274 empty
= isl_union_map_is_empty(write
);
275 isl_union_map_free(write
);
277 gen
->array
[gen
->n_array
].read_only
= empty
;
280 for (i
= 0; i
< n_index
; ++i
) {
285 isl_set
*size
= i
== 0 ? array
: pa
->extent
;
287 bound
= isl_set_dim_max(isl_set_copy(size
), i
);
289 dom
= isl_pw_aff_domain(isl_pw_aff_copy(bound
));
290 ls
= isl_local_space_from_space(isl_set_get_space(dom
));
291 one
= isl_aff_zero_on_domain(ls
);
292 one
= isl_aff_add_constant_si(one
, 1);
293 bound
= isl_pw_aff_add(bound
, isl_pw_aff_alloc(dom
, one
));
294 bound
= isl_pw_aff_gist(bound
, isl_set_copy(gen
->context
));
299 collect_references(gen
, &gen
->array
[gen
->n_array
]);
307 void collect_array_info(struct cuda_gen
*gen
)
309 isl_union_set
*arrays
;
311 arrays
= isl_union_map_range(isl_union_map_copy(gen
->read
));
312 arrays
= isl_union_set_union(arrays
,
313 isl_union_map_range(isl_union_map_copy(gen
->write
)));
314 arrays
= isl_union_set_coalesce(arrays
);
316 gen
->n_array
= isl_union_set_n_set(arrays
);
317 gen
->array
= isl_alloc_array(gen
->ctx
,
318 struct cuda_array_info
, gen
->n_array
);
321 isl_union_set_foreach_set(arrays
, &extract_array_info
, gen
);
322 isl_union_set_free(arrays
);
325 static void free_array_info(struct cuda_gen
*gen
)
329 for (i
= 0; i
< gen
->n_array
; ++i
) {
330 int n_index
= gen
->array
[i
].n_index
;
331 free(gen
->array
[i
].type
);
332 free(gen
->array
[i
].name
);
333 for (j
= 0; j
< n_index
; ++j
) {
334 isl_pw_aff_free(gen
->array
[i
].bound
[j
]);
335 isl_pw_aff_free(gen
->array
[i
].local_bound
[j
]);
337 isl_space_free(gen
->array
[i
].dim
);
338 free(gen
->array
[i
].bound
);
339 free(gen
->array
[i
].local_bound
);
340 free(gen
->array
[i
].refs
);
345 /* Check if a cuda array is a scalar. A scalar is a value that is not stored
346 * as an array or through a pointer reference, but as single data element. At
347 * the moment, scalars are represented as zero dimensional arrays.
349 static int cuda_array_is_scalar(struct cuda_array_info
*array
)
351 return (array
->n_index
== 0);
354 /* Is "array" a read-only scalar?
356 static int cuda_array_is_read_only_scalar(struct cuda_array_info
*array
)
358 return cuda_array_is_scalar(array
) && array
->read_only
;
361 static void declare_device_arrays(struct cuda_gen
*gen
)
365 for (i
= 0; i
< gen
->n_array
; ++i
) {
366 if (cuda_array_is_read_only_scalar(&gen
->array
[i
]))
368 fprintf(gen
->cuda
.host_c
, "%s *dev_%s;\n",
369 gen
->array
[i
].type
, gen
->array
[i
].name
);
371 fprintf(gen
->cuda
.host_c
, "\n");
374 static void print_array_size(struct cuda_gen
*gen
, FILE *out
,
375 struct cuda_array_info
*array
)
380 prn
= isl_printer_to_file(gen
->ctx
, out
);
381 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
382 for (i
= 0; i
< array
->n_index
; ++i
) {
383 prn
= isl_printer_print_str(prn
, "(");
384 prn
= isl_printer_print_pw_aff(prn
, array
->bound
[i
]);
385 prn
= isl_printer_print_str(prn
, ") * ");
387 prn
= isl_printer_print_str(prn
, "sizeof(");
388 prn
= isl_printer_print_str(prn
, array
->type
);
389 prn
= isl_printer_print_str(prn
, ")");
390 isl_printer_free(prn
);
393 static void allocate_device_arrays(struct cuda_gen
*gen
)
397 for (i
= 0; i
< gen
->n_array
; ++i
) {
398 if (cuda_array_is_read_only_scalar(&gen
->array
[i
]))
400 fprintf(gen
->cuda
.host_c
,
401 "cudaCheckReturn(cudaMalloc((void **) &dev_%s, ",
403 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
404 fprintf(gen
->cuda
.host_c
, "));\n");
406 fprintf(gen
->cuda
.host_c
, "\n");
409 static void free_device_arrays(struct cuda_gen
*gen
)
413 for (i
= 0; i
< gen
->n_array
; ++i
) {
414 if (cuda_array_is_read_only_scalar(&gen
->array
[i
]))
416 fprintf(gen
->cuda
.host_c
, "cudaCheckReturn(cudaFree(dev_%s));\n",
421 static void copy_arrays_to_device(struct cuda_gen
*gen
)
425 for (i
= 0; i
< gen
->n_array
; ++i
) {
430 if (cuda_array_is_read_only_scalar(&gen
->array
[i
]))
433 dim
= isl_space_copy(gen
->array
[i
].dim
);
434 read_i
= isl_union_set_extract_set(gen
->copy_in
, dim
);
435 empty
= isl_set_fast_is_empty(read_i
);
436 isl_set_free(read_i
);
440 fprintf(gen
->cuda
.host_c
, "cudaCheckReturn(cudaMemcpy(dev_%s,",
443 if (cuda_array_is_scalar(&(gen
->array
[i
])))
444 fprintf(gen
->cuda
.host_c
, " &%s, ",
447 fprintf(gen
->cuda
.host_c
, " %s, ", gen
->array
[i
].name
);
449 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
450 fprintf(gen
->cuda
.host_c
, ", cudaMemcpyHostToDevice));\n");
452 fprintf(gen
->cuda
.host_c
, "\n");
455 static void copy_arrays_from_device(struct cuda_gen
*gen
)
458 isl_union_set
*write
;
459 write
= isl_union_map_range(isl_union_map_copy(gen
->write
));
461 for (i
= 0; i
< gen
->n_array
; ++i
) {
466 dim
= isl_space_copy(gen
->array
[i
].dim
);
467 write_i
= isl_union_set_extract_set(write
, dim
);
468 empty
= isl_set_fast_is_empty(write_i
);
469 isl_set_free(write_i
);
473 fprintf(gen
->cuda
.host_c
, "cudaCheckReturn(cudaMemcpy(");
474 if (cuda_array_is_scalar(&gen
->array
[i
]))
475 fprintf(gen
->cuda
.host_c
, "&%s, ", gen
->array
[i
].name
);
477 fprintf(gen
->cuda
.host_c
, "%s, ", gen
->array
[i
].name
);
478 fprintf(gen
->cuda
.host_c
, "dev_%s, ", gen
->array
[i
].name
);
479 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
480 fprintf(gen
->cuda
.host_c
, ", cudaMemcpyDeviceToHost));\n");
483 isl_union_set_free(write
);
484 fprintf(gen
->cuda
.host_c
, "\n");
487 static void read_sizes_from_file(struct cuda_gen
*gen
, const char *filename
,
493 file
= fopen(filename
, "r");
497 for (i
= 0; i
< len
; ++i
)
498 if (fscanf(file
, "%d", &sizes
[i
]) < 1)
504 /* Internal data structure for extract_size_of_type.
505 * "type" specifies the name of the space that we want to extract.
506 * "res" is used to store the subset of that space.
508 struct ppcg_extract_size_data
{
513 /* This function is called for each set in a union_set.
514 * If the name of the set matches data->type, we store the
517 static int extract_size_of_type(__isl_take isl_set
*size
, void *user
)
519 struct ppcg_extract_size_data
*data
= user
;
522 name
= isl_set_get_tuple_name(size
);
523 if (name
&& !strcmp(name
, data
->type
)) {
532 /* Given a union map { kernel[i] -> *[...] },
533 * return the range in the space called "type" for the kernel with
534 * sequence number "id".
536 static __isl_give isl_set
*extract_sizes(__isl_keep isl_union_map
*sizes
,
537 const char *type
, int id
)
541 isl_union_set
*local_sizes
;
542 struct ppcg_extract_size_data data
= { type
, NULL
};
547 space
= isl_union_map_get_space(sizes
);
548 space
= isl_space_set_from_params(space
);
549 space
= isl_space_add_dims(space
, isl_dim_set
, 1);
550 space
= isl_space_set_tuple_name(space
, isl_dim_set
, "kernel");
551 dom
= isl_set_universe(space
);
552 dom
= isl_set_fix_si(dom
, isl_dim_set
, 0, id
);
554 local_sizes
= isl_union_set_apply(isl_union_set_from_set(dom
),
555 isl_union_map_copy(sizes
));
556 isl_union_set_foreach_set(local_sizes
, &extract_size_of_type
, &data
);
557 isl_union_set_free(local_sizes
);
561 /* Given a singleton set, extract the first (at most *len) elements
562 * of the single integer tuple into *sizes and update *len if needed.
564 static void read_sizes_from_set(__isl_take isl_set
*set
, int *sizes
, int *len
)
573 dim
= isl_set_dim(set
, isl_dim_set
);
579 for (i
= 0; i
< *len
; ++i
) {
582 ok
= isl_set_plain_is_fixed(set
, isl_dim_set
, i
, &v
);
585 sizes
[i
] = isl_int_get_si(v
);
593 /* Extract user specified "tile" sizes from the "sizes" command line option,
594 * defaulting to option->tile_size in each dimension.
596 static void read_tile_sizes(struct cuda_gen
*gen
)
601 gen
->tile_size
= isl_alloc_array(gen
->ctx
, int, gen
->tile_len
);
602 assert(gen
->tile_size
);
603 for (n
= 0; n
< gen
->tile_len
; ++n
)
604 gen
->tile_size
[n
] = gen
->options
->tile_size
;
606 size
= extract_sizes(gen
->sizes
, "tile", gen
->kernel_id
);
607 read_sizes_from_set(size
, gen
->tile_size
, &gen
->tile_len
);
609 if (gen
->n_parallel
> gen
->tile_len
)
610 gen
->n_parallel
= gen
->tile_len
;
613 /* Extract user specified "block" sizes from the "sizes" command line option,
614 * after filling in some potentially useful defaults.
616 static void read_block_sizes(struct cuda_gen
*gen
)
622 gen
->n_block
= (n
<= 3) ? n
: 3;
623 switch (gen
->n_block
) {
625 gen
->block_dim
[0] = 512;
628 gen
->block_dim
[0] = 32;
629 gen
->block_dim
[1] = 16;
632 gen
->block_dim
[0] = 32;
633 gen
->block_dim
[1] = 4;
634 gen
->block_dim
[2] = 4;
638 size
= extract_sizes(gen
->sizes
, "block", gen
->kernel_id
);
639 read_sizes_from_set(size
, gen
->block_dim
, &gen
->n_block
);
642 /* Extract user specified "grid" sizes from the "sizes" command line option,
643 * after filling in some potentially useful defaults.
645 static void read_grid_sizes(struct cuda_gen
*gen
)
647 int n
= gen
->n_parallel
;
650 gen
->n_grid
= (n
<= 2) ? n
: 2;
651 switch (gen
->n_grid
) {
653 gen
->grid_dim
[0] = 32768;
656 gen
->grid_dim
[0] = 256;
657 gen
->grid_dim
[1] = 256;
661 size
= extract_sizes(gen
->sizes
, "grid", gen
->kernel_id
);
662 read_sizes_from_set(size
, gen
->grid_dim
, &gen
->n_grid
);
665 /* Extract user specified sizes from the "sizes" command line option
666 * after filling in some potentially useful defaults.
668 static void read_sizes(struct cuda_gen
*gen
)
670 read_tile_sizes(gen
);
671 read_block_sizes(gen
);
672 read_grid_sizes(gen
);
675 static void free_stmts(struct cuda_stmt
*stmts
, int n
)
679 for (i
= 0; i
< n
; ++i
) {
680 struct cuda_stmt_access
*access
, *next
;
682 for (access
= stmts
[i
].accesses
; access
; access
= next
) {
684 isl_map_free(access
->access
);
688 isl_set_free(stmts
[i
].domain
);
693 void clear_cuda_gen(struct cuda_gen
*gen
)
695 free_stmts(gen
->stmts
, gen
->n_stmts
);
696 free_array_info(gen
);
697 isl_union_map_free(gen
->sizes
);
698 isl_set_free(gen
->context
);
699 isl_union_set_free(gen
->copy_in
);
700 isl_union_map_free(gen
->sched
);
701 isl_union_map_free(gen
->read
);
702 isl_union_map_free(gen
->write
);
705 static void print_reverse_list(FILE *out
, int len
, int *list
)
713 for (i
= 0; i
< len
; ++i
) {
716 fprintf(out
, "%d", list
[len
- 1 - i
]);
721 static void print_kernel_launch(struct cuda_gen
*gen
,
722 __isl_keep isl_union_set
*arrays
)
729 print_indent(gen
->code
.dst
, gen
->code
.indent
);
730 fprintf(gen
->code
.dst
, "kernel%d <<<k%d_dimGrid, k%d_dimBlock>>> (",
731 gen
->kernel_id
, gen
->kernel_id
, gen
->kernel_id
);
732 fprintf(gen
->cuda
.kernel_c
, "__global__ void kernel%d(",
734 fprintf(gen
->cuda
.kernel_h
, "__global__ void kernel%d(",
737 for (i
= 0; i
< gen
->n_array
; ++i
) {
742 dim
= isl_space_copy(gen
->array
[i
].dim
);
743 arr
= isl_union_set_extract_set(arrays
, dim
);
744 empty
= isl_set_fast_is_empty(arr
);
750 fprintf(gen
->code
.dst
, ", ");
751 fprintf(gen
->cuda
.kernel_c
, ", ");
752 fprintf(gen
->cuda
.kernel_h
, ", ");
755 if (cuda_array_is_read_only_scalar(&gen
->array
[i
])) {
756 fprintf(gen
->code
.dst
, "%s", gen
->array
[i
].name
);
757 fprintf(gen
->cuda
.kernel_c
, "%s %s",
758 gen
->array
[i
].type
, gen
->array
[i
].name
);
759 fprintf(gen
->cuda
.kernel_h
, "%s %s",
760 gen
->array
[i
].type
, gen
->array
[i
].name
);
762 fprintf(gen
->code
.dst
, "dev_%s", gen
->array
[i
].name
);
763 fprintf(gen
->cuda
.kernel_c
, "%s *%s",
764 gen
->array
[i
].type
, gen
->array
[i
].name
);
765 fprintf(gen
->cuda
.kernel_h
, "%s *%s",
766 gen
->array
[i
].type
, gen
->array
[i
].name
);
772 dim
= isl_union_set_get_space(arrays
);
773 nparam
= isl_space_dim(dim
, isl_dim_param
);
774 for (i
= 0; i
< nparam
; ++i
) {
775 const char *name
= isl_space_get_dim_name(dim
, isl_dim_param
, i
);
777 fprintf(gen
->code
.dst
, ", ");
778 fprintf(gen
->cuda
.kernel_c
, ", ");
779 fprintf(gen
->cuda
.kernel_h
, ", ");
781 fprintf(gen
->code
.dst
, "%s", name
);
782 fprintf(gen
->cuda
.kernel_c
, "int %s", name
);
783 fprintf(gen
->cuda
.kernel_h
, "int %s", name
);
788 for (i
= 0; i
< gen
->tile_first
; ++i
) {
790 fprintf(gen
->code
.dst
, ", ");
791 fprintf(gen
->cuda
.kernel_c
, ", ");
792 fprintf(gen
->cuda
.kernel_h
, ", ");
794 fprintf(gen
->code
.dst
, "h%d", i
);
795 fprintf(gen
->cuda
.kernel_c
, "int h%d", i
);
796 fprintf(gen
->cuda
.kernel_h
, "int h%d", i
);
800 fprintf(gen
->code
.dst
, ");\n");
801 fprintf(gen
->cuda
.kernel_c
, ")\n");
802 fprintf(gen
->cuda
.kernel_h
, ");\n");
804 fprintf(gen
->code
.dst
, "cudaCheckKernel();\n");
807 /* Construct a map from a domain of dimensionality "len"
808 * to a domain of dimensionality "len" + "tile_len" that tiles
809 * the "tile_len" coordinates starting at "first".
810 * In particular, [s_i] -> [s_i / tile_size[i], s_i % tile_size[i]].
811 * "dim" prescribes the parameters.
813 static __isl_give isl_map
*tile(__isl_take isl_space
*dim
, int len
,
814 int first
, int tile_len
, int *tile_size
)
824 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
825 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
+ tile_len
);
826 bmap
= isl_basic_map_universe(isl_space_copy(dim
));
827 ls
= isl_local_space_from_space(dim
);
829 for (i
= 0; i
< len
- tile_len
; ++i
) {
830 int j
= i
< first
? i
: i
+ tile_len
;
831 int k
= i
< first
? i
: i
+ 2 * tile_len
;
833 c
= isl_equality_alloc(isl_local_space_copy(ls
));
834 isl_int_set_si(v
, -1);
835 isl_constraint_set_coefficient(c
, isl_dim_in
, j
, v
);
836 isl_int_set_si(v
, 1);
837 isl_constraint_set_coefficient(c
, isl_dim_out
, k
, v
);
838 bmap
= isl_basic_map_add_constraint(bmap
, c
);
841 for (i
= 0; i
< tile_len
; ++i
) {
842 c
= isl_equality_alloc(isl_local_space_copy(ls
));
843 isl_int_set_si(v
, -1);
844 isl_constraint_set_coefficient(c
, isl_dim_in
, first
+ i
, v
);
845 isl_int_set_si(v
, tile_size
[i
]);
846 isl_constraint_set_coefficient(c
, isl_dim_out
, first
+ i
, v
);
847 isl_int_set_si(v
, 1);
848 isl_constraint_set_coefficient(c
, isl_dim_out
,
849 first
+ i
+ tile_len
, v
);
850 bmap
= isl_basic_map_add_constraint(bmap
, c
);
852 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
853 isl_int_set_si(v
, 1);
854 isl_constraint_set_coefficient(c
, isl_dim_out
,
855 first
+ i
+ tile_len
, v
);
856 bmap
= isl_basic_map_add_constraint(bmap
, c
);
858 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
859 isl_int_set_si(v
, -1);
860 isl_constraint_set_coefficient(c
, isl_dim_out
,
861 first
+ i
+ tile_len
, v
);
862 isl_int_set_si(v
, tile_size
[i
] - 1);
863 isl_constraint_set_constant(c
, v
);
864 bmap
= isl_basic_map_add_constraint(bmap
, c
);
867 isl_local_space_free(ls
);
870 return isl_map_from_basic_map(bmap
);
873 /* Construct a map from a domain of dimensionality "len"
874 * to a domain of dimensionality "len" + "wrap_len" that "wraps"
875 * the "wrap_len" coordinates starting at "first" according to "wrap_size".
876 * In particular, [s_i] -> [s_i, s_i % wrap_size[i]].
877 * To do so, we need extra variables corresponding to [s_i / wrap_size[i]],
878 * that are projected out at the end.
879 * "dim" prescribes the parameters.
881 static __isl_give isl_map
*wrap(__isl_take isl_space
*dim
, int len
,
882 int first
, int wrap_len
, int *wrap_size
)
889 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
890 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
+ 2 * wrap_len
);
891 bmap
= isl_basic_map_universe(isl_space_copy(dim
));
892 ls
= isl_local_space_from_space(dim
);
894 for (i
= 0; i
< len
; ++i
) {
895 int k
= i
< first
+ wrap_len
? i
: i
+ 2 * wrap_len
;
897 c
= isl_equality_alloc(isl_local_space_copy(ls
));
898 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, -1);
899 isl_constraint_set_coefficient_si(c
, isl_dim_out
, k
, 1);
900 bmap
= isl_basic_map_add_constraint(bmap
, c
);
903 for (i
= 0; i
< wrap_len
; ++i
) {
904 c
= isl_equality_alloc(isl_local_space_copy(ls
));
905 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
907 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
908 first
+ wrap_len
+ i
, 1);
909 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
910 first
+ 2 * wrap_len
+ i
, wrap_size
[i
]);
911 bmap
= isl_basic_map_add_constraint(bmap
, c
);
913 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
914 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
915 first
+ wrap_len
+ i
, 1);
916 bmap
= isl_basic_map_add_constraint(bmap
, c
);
918 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
919 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
920 first
+ wrap_len
+ i
, -1);
921 isl_constraint_set_constant_si(c
, wrap_size
[i
] - 1);
922 bmap
= isl_basic_map_add_constraint(bmap
, c
);
925 isl_local_space_free(ls
);
927 bmap
= isl_basic_map_project_out(bmap
, isl_dim_out
,
928 first
+ 2 * wrap_len
, wrap_len
);
930 return isl_map_from_basic_map(bmap
);
933 /* Add "n" parameters named prefix%d.
935 static __isl_give isl_set
*add_params( __isl_take isl_set
*set
,
936 int n
, const char *prefix
)
942 nparam
= isl_set_dim(set
, isl_dim_param
);
943 set
= isl_set_add_dims(set
, isl_dim_param
, n
);
945 for (i
= 0; i
< n
; ++i
) {
946 snprintf(name
, sizeof(name
), "%s%d", prefix
, i
);
947 set
= isl_set_set_dim_name(set
, isl_dim_param
,
954 /* Equate the "n" dimensions of "set" starting at "first" to
955 * freshly created parameters named prefix%d.
957 static __isl_give isl_set
*parametrize(__isl_take isl_set
*set
,
958 int first
, int n
, const char *prefix
)
968 nparam
= isl_set_dim(set
, isl_dim_param
);
970 set
= add_params(set
, n
, prefix
);
972 dim
= isl_set_get_space(set
);
973 bset
= isl_basic_set_universe(isl_space_copy(dim
));
974 ls
= isl_local_space_from_space(dim
);
978 for (i
= 0; i
< n
; ++i
) {
979 c
= isl_equality_alloc(isl_local_space_copy(ls
));
980 isl_int_set_si(v
, -1);
981 isl_constraint_set_coefficient(c
, isl_dim_param
, nparam
+ i
, v
);
982 isl_int_set_si(v
, 1);
983 isl_constraint_set_coefficient(c
, isl_dim_set
, first
+ i
, v
);
984 bset
= isl_basic_set_add_constraint(bset
, c
);
988 isl_local_space_free(ls
);
990 return isl_set_intersect(set
, isl_set_from_basic_set(bset
));
993 static __isl_give isl_set
*parametrization(__isl_take isl_space
*dim
,
994 int len
, int first
, int n
, const char *prefix
)
998 dim
= isl_space_add_dims(dim
, isl_dim_set
, len
);
999 set
= isl_set_universe(dim
);
1001 return parametrize(set
, first
, n
, prefix
);
1004 /* Tile the B loops over the tile sizes and then tile/wrap
1005 * the T1 loops over the blocks.
1007 static __isl_give isl_union_map
*tile_schedule(struct cuda_gen
*gen
,
1008 __isl_take isl_union_map
*sched
)
1011 isl_map
*tiling
, *block_tiling
;
1013 dim
= isl_union_map_get_space(sched
);
1014 tiling
= tile(isl_space_copy(dim
), gen
->untiled_len
,
1015 gen
->tile_first
, gen
->tile_len
, gen
->tile_size
);
1017 if (gen
->options
->wrap
)
1018 block_tiling
= wrap(dim
, gen
->untiled_len
+ gen
->tile_len
,
1019 gen
->tile_first
, gen
->n_grid
, gen
->grid_dim
);
1021 block_tiling
= tile(dim
, gen
->untiled_len
+ gen
->tile_len
,
1022 gen
->tile_first
, gen
->n_grid
, gen
->grid_dim
);
1024 gen
->tiled_len
= gen
->untiled_len
+ gen
->tile_len
+ gen
->n_grid
;
1026 tiling
= isl_map_apply_range(tiling
, block_tiling
);
1028 sched
= isl_union_map_apply_range(sched
,
1029 isl_union_map_from_map(tiling
));
1031 gen
->shared_len
= gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
;
1036 static __isl_give isl_union_map
*parametrize_tiled_schedule(
1037 struct cuda_gen
*gen
, __isl_take isl_union_map
*sched
)
1042 dim
= isl_union_map_get_space(sched
);
1043 par
= parametrization(dim
, gen
->tiled_len
, 0, gen
->tile_first
, "h");
1044 sched
= isl_union_map_intersect_range(sched
,
1045 isl_union_set_from_set(par
));
1047 dim
= isl_union_map_get_space(sched
);
1048 par
= parametrization(dim
, gen
->tiled_len
,
1049 gen
->tile_first
+ gen
->n_grid
, gen
->n_grid
, "b");
1050 sched
= isl_union_map_intersect_range(sched
,
1051 isl_union_set_from_set(par
));
1056 /* Tile/wrap the P1 loops over the threads.
1058 static __isl_give isl_union_map
*thread_tile_schedule(struct cuda_gen
*gen
,
1059 __isl_take isl_union_map
*sched
)
1065 dim
= isl_union_map_get_space(sched
);
1067 if (gen
->options
->wrap
)
1068 tiling
= wrap(isl_space_copy(dim
), gen
->tiled_len
,
1069 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
1071 tiling
= tile(isl_space_copy(dim
), gen
->tiled_len
,
1072 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
1073 gen
->thread_tiled_len
= gen
->tiled_len
+ gen
->n_block
;
1075 sched
= isl_union_map_apply_range(sched
,
1076 isl_union_map_from_map(tiling
));
1078 par
= parametrization(dim
, gen
->thread_tiled_len
,
1079 gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
+ gen
->n_block
,
1081 sched
= isl_union_map_intersect_range(sched
,
1082 isl_union_set_from_set(par
));
1084 gen
->shared_len
= gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
;
1089 /* If the user asked for it, scale the shared memory tile loops
1090 * (T1T and T2) of "sched" by gen->tile_size[i].
1091 * If we are not performing "wrapping", then additionally scale the T1P
1092 * loops by gen->grid_dim[i].
1094 static __isl_give isl_union_map
*scale_tile_loops(struct cuda_gen
*gen
,
1095 __isl_take isl_union_map
*sched
)
1099 isl_basic_map
*scale
;
1101 isl_local_space
*ls
;
1103 if (!gen
->options
->scale_tile_loops
)
1106 dim
= isl_union_map_get_space(sched
);
1107 dim
= isl_space_add_dims(dim
, isl_dim_in
, gen
->tiled_len
);
1108 dim
= isl_space_add_dims(dim
, isl_dim_out
, gen
->tiled_len
);
1109 scale
= isl_basic_map_universe(isl_space_copy(dim
));
1110 ls
= isl_local_space_from_space(dim
);
1112 for (i
= 0; i
< gen
->tiled_len
; ++i
) {
1115 if (i
>= gen
->tile_first
&& i
< gen
->tile_first
+ gen
->n_grid
) {
1116 f
= gen
->tile_size
[i
- gen
->tile_first
];
1117 if (!gen
->options
->wrap
)
1118 f
*= gen
->grid_dim
[i
- gen
->tile_first
];
1119 } else if (i
>= gen
->tile_first
+ gen
->n_grid
&&
1120 i
< gen
->tile_first
+ gen
->n_grid
+ gen
->tile_len
) {
1121 f
= gen
->tile_size
[i
- (gen
->tile_first
+ gen
->n_grid
)];
1124 c
= isl_equality_alloc(isl_local_space_copy(ls
));
1125 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
1126 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
1127 scale
= isl_basic_map_add_constraint(scale
, c
);
1130 isl_local_space_free(ls
);
1132 sched
= isl_union_map_apply_range(sched
,
1133 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
1138 /* If we are not performing "wrapping" and if the user asked for it,
1139 * scale the thread tile loops (P1T) of "sched" by gen->block_dim[i].
1141 static __isl_give isl_union_map
*scale_thread_tile_loops(struct cuda_gen
*gen
,
1142 __isl_take isl_union_map
*sched
)
1146 isl_basic_map
*scale
;
1148 isl_local_space
*ls
;
1150 if (gen
->options
->wrap
)
1152 if (!gen
->options
->scale_tile_loops
)
1155 dim
= isl_union_map_get_space(sched
);
1156 dim
= isl_space_add_dims(dim
, isl_dim_in
, gen
->thread_tiled_len
);
1157 dim
= isl_space_add_dims(dim
, isl_dim_out
, gen
->thread_tiled_len
);
1158 scale
= isl_basic_map_universe(isl_space_copy(dim
));
1159 ls
= isl_local_space_from_space(dim
);
1161 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
) {
1164 if (i
>= gen
->shared_len
&&
1165 i
< gen
->shared_len
+ gen
->n_block
)
1166 f
= gen
->block_dim
[i
- gen
->shared_len
];
1168 c
= isl_equality_alloc(isl_local_space_copy(ls
));
1169 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
1170 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
1171 scale
= isl_basic_map_add_constraint(scale
, c
);
1174 isl_local_space_free(ls
);
1176 sched
= isl_union_map_apply_range(sched
,
1177 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
1182 /* If we are not performing "wrapping" and if the user asked for it,
1183 * scale the "n_tile" loops starting at "first" of "sched" by gen->block_dim[i].
1185 static __isl_give isl_union_map
*scale_access_tile_loops(struct cuda_gen
*gen
,
1186 __isl_take isl_union_map
*sched
, int len
, int first
, int n_tile
)
1190 isl_basic_map
*scale
;
1192 isl_local_space
*ls
;
1194 if (gen
->options
->wrap
)
1196 if (!gen
->options
->scale_tile_loops
)
1199 dim
= isl_union_map_get_space(sched
);
1200 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
1201 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
);
1202 scale
= isl_basic_map_universe(isl_space_copy(dim
));
1203 ls
= isl_local_space_from_space(dim
);
1205 for (i
= 0; i
< len
; ++i
) {
1208 if (i
>= first
&& i
< first
+ n_tile
)
1209 f
= gen
->block_dim
[i
- first
];
1211 c
= isl_equality_alloc(isl_local_space_copy(ls
));
1212 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
1213 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
1214 scale
= isl_basic_map_add_constraint(scale
, c
);
1217 isl_local_space_free(ls
);
1219 sched
= isl_union_map_apply_range(sched
,
1220 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
1225 /* If print_user_stmt is set, we want to print the statements ourselves,
1226 * instead of relying on the C preprocessor. If so, we need to use
1227 * the stop option so that the domains will be saved on the statement
1230 static void print_cloog_shared_body(struct cuda_gen
*gen
,
1231 __isl_keep isl_set
*context
, __isl_keep isl_union_map
*sched
, int len
,
1232 void (*print_user_stmt
)(struct gpucode_info
*info
,
1233 struct clast_user_stmt
*s
),
1237 CloogOptions
*options
;
1238 CloogDomain
*cloog_context
;
1239 CloogUnionDomain
*ud
;
1241 struct clast_stmt
*stmt
;
1244 sched
= isl_union_map_copy(sched
);
1245 sched
= isl_union_map_align_params(sched
, isl_set_get_space(context
));
1247 options
= cloog_options_malloc(gen
->state
);
1248 options
->language
= CLOOG_LANGUAGE_C
;
1249 options
->strides
= 1;
1253 options
->override
= 1;
1254 options
->save_domains
= 1;
1255 options
->noscalars
= 1;
1256 options
->first_unroll
= first_unroll
;
1258 ud
= cloog_union_domain_from_isl_union_map(sched
);
1259 for (i
= 0; i
< len
; ++i
) {
1260 snprintf(name
, sizeof(name
), "c%d", i
);
1261 ud
= cloog_union_domain_set_name(ud
, CLOOG_SCAT
, i
, name
);
1263 cloog_context
= cloog_domain_from_isl_set(isl_set_copy(context
));
1264 input
= cloog_input_alloc(cloog_context
, ud
);
1266 stmt
= cloog_clast_create_from_input(input
, options
);
1268 gen
->stmt_code
.indent
= gen
->kernel_code
.indent
;
1269 gen
->stmt_code
.dst
= gen
->cuda
.kernel_c
;
1270 gen
->stmt_code
.print_user_stmt
= print_user_stmt
;
1271 gen
->stmt_code
.print_user_stmt_list
= NULL
;
1272 gen
->stmt_code
.print_for_head
= NULL
;
1273 gen
->stmt_code
.print_for_foot
= NULL
;
1274 gen
->stmt_code
.user
= gen
;
1275 gpu_print_host_stmt(&gen
->stmt_code
, stmt
);
1277 cloog_clast_free(stmt
);
1278 cloog_options_free(options
);
1281 /* Add "len" parameters p[i] called prefix%d,
1282 * with bounds to 0 <= p[i] < size[i].
1284 __isl_give isl_set
*add_bounded_parameters(__isl_take isl_set
*set
,
1285 int len
, int *size
, const char *prefix
)
1291 isl_basic_set
*bset
;
1293 isl_local_space
*ls
;
1296 nparam
= isl_set_dim(set
, isl_dim_param
);
1297 set
= isl_set_add_dims(set
, isl_dim_param
, len
);
1299 for (i
= 0; i
< len
; ++i
) {
1300 snprintf(name
, sizeof(name
), "%s%d", prefix
, i
);
1301 set
= isl_set_set_dim_name(set
, isl_dim_param
,
1305 dim
= isl_set_get_space(set
);
1306 bset
= isl_basic_set_universe(isl_space_copy(dim
));
1307 ls
= isl_local_space_from_space(dim
);
1311 for (i
= 0; i
< len
; ++i
) {
1312 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
1313 isl_int_set_si(v
, 1);
1314 isl_constraint_set_coefficient(c
, isl_dim_param
, nparam
+ i
, v
);
1315 bset
= isl_basic_set_add_constraint(bset
, c
);
1317 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
1318 isl_int_set_si(v
, -1);
1319 isl_constraint_set_coefficient(c
, isl_dim_param
, nparam
+ i
, v
);
1320 isl_int_set_si(v
, size
[i
] - 1);
1321 isl_constraint_set_constant(c
, v
);
1322 bset
= isl_basic_set_add_constraint(bset
, c
);
1326 isl_local_space_free(ls
);
1328 return isl_set_intersect(set
, isl_set_from_basic_set(bset
));
1331 static void print_shared_body(struct cuda_gen
*gen
,
1332 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*sched
,
1333 int len
, void (*print_user_stmt
)(struct gpucode_info
*info
,
1334 struct clast_user_stmt
*s
),
1339 context
= isl_set_copy(shared_domain
);
1340 context
= parametrize(context
, 0, gen
->shared_len
, "g");
1341 context
= isl_set_project_out(context
, isl_dim_set
, 0, gen
->shared_len
);
1342 context
= add_bounded_parameters(context
,
1343 gen
->n_block
, gen
->block_dim
, "t");
1345 print_cloog_shared_body(gen
, context
, sched
, len
, print_user_stmt
,
1348 isl_set_free(context
);
1351 /* Given a tile of an array, construct a map that maps each element
1352 * of the tile to a copy of the tile shifted to the origin
1353 * (based on the lower bounds in group->private_bound or group->shared_bound).
1354 * If any of the indices is strided, then {private,shared}_bound[i].shift_map
1355 * is applied to the index first.
1356 * The domain of the resulting map is "access",
1357 * while the range space is anonymous.
1359 static __isl_give isl_map
*shift_access(__isl_take isl_set
*access
,
1360 struct cuda_array_ref_group
*group
)
1364 isl_basic_set
*bset
;
1365 isl_basic_map
*bmap
;
1367 isl_basic_set
*offset
;
1368 isl_basic_map
*shift
;
1369 isl_basic_map
*pre_shift
;
1372 struct cuda_array_bound
*bounds
;
1373 int n_index
= group
->array
->n_index
;
1375 bounds
= group
->private_bound
;
1377 bounds
= group
->shared_bound
;
1379 dim
= isl_set_get_space(access
);
1380 dim
= isl_space_drop_dims(dim
, isl_dim_set
, 0, n_index
);
1381 offset
= isl_basic_set_universe(dim
);
1382 for (i
= 0; i
< n_index
; ++i
) {
1383 lb
= isl_aff_copy(bounds
[i
].lb
);
1384 bmap
= isl_basic_map_from_aff(lb
);
1385 bset
= isl_basic_map_range(bmap
);
1386 offset
= isl_basic_set_flat_product(offset
, bset
);
1388 offset
= isl_basic_set_neg(offset
);
1390 dim
= isl_space_map_from_set(isl_set_get_space(access
));
1391 shift
= isl_basic_map_identity(dim
);
1392 shift
= isl_basic_map_set_tuple_name(shift
, isl_dim_out
, NULL
);
1394 bset
= isl_basic_set_universe(isl_set_get_space(access
));
1395 bmap
= isl_basic_map_from_domain_and_range(bset
, offset
);
1397 shift
= isl_basic_map_sum(shift
, bmap
);
1399 dim
= isl_set_get_space(access
);
1400 dim
= isl_space_drop_dims(dim
, isl_dim_set
, 0, n_index
);
1401 dim
= isl_space_map_from_set(dim
);
1402 pre_shift
= isl_basic_map_universe(isl_space_copy(dim
));
1403 dim
= isl_space_add_dims(dim
, isl_dim_in
, 1);
1404 dim
= isl_space_add_dims(dim
, isl_dim_out
, 1);
1405 for (i
= 0; i
< n_index
; ++i
) {
1406 if (!bounds
[i
].shift_map
)
1407 bmap
= isl_basic_map_identity(isl_space_copy(dim
));
1409 bmap
= isl_basic_map_copy(bounds
[i
].shift_map
);
1410 pre_shift
= isl_basic_map_flat_product(pre_shift
, bmap
);
1412 isl_space_free(dim
);
1413 name
= isl_basic_map_get_tuple_name(shift
, isl_dim_in
);
1414 pre_shift
= isl_basic_map_set_tuple_name(pre_shift
, isl_dim_in
, name
);
1415 pre_shift
= isl_basic_map_set_tuple_name(pre_shift
, isl_dim_out
, name
);
1416 shift
= isl_basic_map_apply_range(pre_shift
, shift
);
1418 sched
= isl_map_from_basic_map(shift
);
1419 sched
= isl_map_intersect_domain(sched
, access
);
1424 /* Construct a schedule for iterating over all elements in the given
1425 * piece of an array. The schedule iterates over a copy of the piece
1426 * that is shifted to the origin.
1427 * We subsequently also perform the tiling/wrapping over the threads.
1429 * In particular, we tile the final iterators so that the final thread
1430 * dimension runs over the final array dimension.
1431 * However, if those final iterators have only a single iteration,
1432 * we try to tile earlier iterators instead.
1434 static __isl_give isl_union_map
*access_schedule(struct cuda_gen
*gen
,
1435 __isl_take isl_set
*access
, struct cuda_array_ref_group
*group
)
1439 isl_union_map
*usched
;
1442 unsigned nvar
= isl_set_dim(access
, isl_dim_set
);
1446 sched
= shift_access(access
, group
);
1448 n_tile
= gen
->n_block
;
1449 if (n_tile
> nvar
) {
1451 sched
= isl_map_insert_dims(sched
,
1452 isl_dim_out
, 0, n_tile
- nvar
);
1453 for (i
= 0; i
< n_tile
- nvar
; ++i
)
1454 sched
= isl_map_fix_si(sched
, isl_dim_out
, i
, 0);
1458 first
= nvar
- n_tile
;
1460 for (; first
> 0; first
--)
1461 if (!isl_map_plain_is_fixed(sched
, isl_dim_out
,
1462 first
+ n_tile
- 1, NULL
))
1465 dim
= isl_map_get_space(sched
);
1466 dim
= isl_space_params(dim
);
1467 if (gen
->options
->wrap
)
1468 tiling
= wrap(isl_space_copy(dim
), nvar
, first
,
1469 n_tile
, gen
->block_dim
);
1471 tiling
= tile(isl_space_copy(dim
), nvar
, first
,
1472 n_tile
, gen
->block_dim
);
1473 sched
= isl_map_apply_range(sched
, tiling
);
1475 par
= parametrization(dim
, nvar
+ n_tile
, first
+ n_tile
, n_tile
, "t");
1476 usched
= isl_union_map_from_map(sched
);
1477 usched
= isl_union_map_intersect_range(usched
,
1478 isl_union_set_from_set(par
));
1480 usched
= scale_access_tile_loops(gen
, usched
, nvar
+ n_tile
,
1486 /* Print an access to the element in the global memory copy of the
1487 * given array that corresponds to the element described by "pma".
1488 * of the original array.
1489 * The copy in global memory has been linearized, so we need to take
1490 * the array size into account.
1492 static void print_global_index(FILE *out
,
1493 struct cuda_array_info
*array
, __isl_keep isl_pw_multi_aff
*pma
,
1494 __isl_keep isl_set
*domain
)
1497 isl_ctx
*ctx
= isl_pw_multi_aff_get_ctx(pma
);
1500 if (cuda_array_is_scalar(array
)) {
1501 fprintf(out
, "*%s", array
->name
);
1505 fprintf(out
, "%s[", array
->name
);
1506 prn
= isl_printer_to_file(ctx
, out
);
1507 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
1508 for (i
= 0; i
+ 1 < array
->n_index
; ++i
)
1509 prn
= isl_printer_print_str(prn
, "(");
1510 for (i
= 0; i
< array
->n_index
; ++i
) {
1511 isl_pw_aff
*pa
= isl_pw_multi_aff_get_pw_aff(pma
, i
);
1512 pa
= isl_pw_aff_coalesce(pa
);
1513 pa
= isl_pw_aff_gist(pa
, isl_set_copy(domain
));
1515 prn
= isl_printer_print_str(prn
, ") * (");
1516 prn
= isl_printer_print_pw_aff(prn
,
1517 array
->local_bound
[i
]);
1518 prn
= isl_printer_print_str(prn
, ") + ");
1520 prn
= isl_printer_print_pw_aff(prn
, pa
);
1521 isl_pw_aff_free(pa
);
1523 isl_printer_free(prn
);
1527 /* Given an index expression into a tile of an array, adjust the expression
1528 * to a shift of the tile to the origin
1529 * (based on the lower bounds in array->shared_bound).
1530 * If the index is strided, then we first add
1531 * bound->shift and divide by bound->stride.
1533 static __isl_give isl_pw_aff
*shift_index(__isl_take isl_pw_aff
*pa
,
1534 struct cuda_array_info
*array
,
1535 struct cuda_array_bound
*bound
, __isl_take isl_set
*domain
)
1542 shift
= bound
->shift
;
1543 shift
= isl_aff_copy(shift
);
1544 shift
= isl_aff_project_domain_on_params(shift
);
1545 shift
= isl_aff_align_params(shift
, isl_pw_aff_get_space(pa
));
1546 tmp
= isl_pw_aff_alloc(isl_set_copy(domain
), shift
);
1547 pa
= isl_pw_aff_add(pa
, tmp
);
1548 pa
= isl_pw_aff_scale_down(pa
, bound
->stride
);
1551 lb
= isl_aff_copy(bound
->lb
);
1552 lb
= isl_aff_project_domain_on_params(lb
);
1554 lb
= isl_aff_align_params(lb
, isl_pw_aff_get_space(pa
));
1556 tmp
= isl_pw_aff_alloc(isl_set_copy(domain
), lb
);
1557 pa
= isl_pw_aff_sub(pa
, tmp
);
1558 pa
= isl_pw_aff_coalesce(pa
);
1559 pa
= isl_pw_aff_gist(pa
, domain
);
1564 /* Print an access to the element in the private/shared memory copy of the
1565 * given array reference group that corresponds to the element described
1566 * by "pma" of the original array.
1567 * Since the array in private/shared memory is just a shifted copy of part
1568 * of the original array, we simply need to subtract the lower bound,
1569 * which was computed in can_tile_for_shared_memory.
1570 * If any of the indices is strided, then we first add
1571 * bounds[i].shift and divide by bounds[i].stride.
1573 static void print_local_index(FILE *out
,
1574 struct cuda_array_ref_group
*group
, struct cuda_array_bound
*bounds
,
1575 __isl_keep isl_pw_multi_aff
*pma
, __isl_keep isl_set
*domain
)
1578 isl_ctx
*ctx
= isl_pw_multi_aff_get_ctx(pma
);
1580 struct cuda_array_info
*array
= group
->array
;
1582 print_array_name(out
, group
);
1583 for (i
= 0; i
< array
->n_index
; ++i
) {
1584 isl_pw_aff
*pa
= isl_pw_multi_aff_get_pw_aff(pma
, i
);
1586 pa
= shift_index(pa
, array
, &bounds
[i
], isl_set_copy(domain
));
1589 prn
= isl_printer_to_file(ctx
, out
);
1590 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
1591 prn
= isl_printer_print_pw_aff(prn
, pa
);
1592 isl_printer_free(prn
);
1594 isl_pw_aff_free(pa
);
1598 /* This function is called for each leaf in the clast of the code
1599 * for copying to or from shared/private memory.
1600 * The statement name is {read,write}_{shared,private}_<array>.
1602 * The schedule iterates over the array elements, so we can use
1603 * the domain of copy_sched at the current scheduling position
1604 * as the index of the array.
1606 static void print_copy_statement(struct gpucode_info
*code
,
1607 struct clast_user_stmt
*u
)
1609 struct cuda_gen
*gen
= code
->user
;
1612 struct cuda_array_ref_group
*group
= gen
->copy_group
;
1613 struct cuda_array_bound
*bounds
= gen
->copy_bound
;
1620 isl_pw_multi_aff
*pma
;
1623 read
= !strncmp(u
->statement
->name
, "read", 4);
1625 domain
= extract_host_domain(u
);
1628 sched
= isl_map_copy(gen
->copy_sched
);
1629 sched
= isl_map_reverse(sched
);
1630 sched
= isl_map_intersect_domain(sched
, domain
);
1631 n_in
= isl_map_dim(sched
, isl_dim_in
);
1632 n_out
= isl_map_dim(sched
, isl_dim_out
);
1633 dim
= isl_map_get_space(sched
);
1634 dim
= isl_space_drop_dims(dim
, isl_dim_in
, 0, n_in
);
1635 dim
= isl_space_drop_dims(dim
, isl_dim_out
, 0, n_out
);
1636 param
= parametrization(dim
, n_in
, 0, n_in
, "c");
1637 sched
= isl_map_align_params(sched
, isl_set_get_space(param
));
1638 sched
= isl_map_intersect_domain(sched
, param
);
1639 index
= isl_map_range(sched
);
1640 domain
= isl_set_copy(index
);
1641 pma
= isl_pw_multi_aff_from_set(index
);
1642 pma
= isl_pw_multi_aff_coalesce(pma
);
1643 domain
= isl_set_params(domain
);
1645 print_indent(code
->dst
, code
->indent
);
1647 print_local_index(code
->dst
, group
, bounds
, pma
, domain
);
1648 fprintf(code
->dst
, " = ");
1649 print_global_index(code
->dst
, group
->array
, pma
, domain
);
1651 print_global_index(code
->dst
, group
->array
, pma
, domain
);
1652 fprintf(code
->dst
, " = ");
1653 print_local_index(code
->dst
, group
, bounds
, pma
, domain
);
1655 fprintf(code
->dst
, ";\n");
1657 isl_pw_multi_aff_free(pma
);
1658 isl_set_free(domain
);
1661 static void print_shared_access(struct cuda_gen
*gen
,
1662 __isl_keep isl_set
*shared_domain
, __isl_take isl_set
*access
,
1663 const char *type
, struct cuda_array_ref_group
*group
)
1665 const char *array_name
;
1668 isl_union_map
*sched
;
1669 unsigned nvar
= isl_set_dim(access
, isl_dim_set
);
1672 ctx
= isl_set_get_ctx(access
);
1673 array_name
= isl_set_get_tuple_name(access
);
1674 name
= isl_alloc_array(ctx
, char,
1675 strlen(type
) + sizeof("_shared_") + strlen(array_name
) + 20);
1676 if (group
->array
->n_group
> 1)
1677 sprintf(name
, "%s_shared_%s_%d", type
, array_name
, group
->nr
);
1679 sprintf(name
, "%s_shared_%s", type
, array_name
);
1680 access
= isl_set_set_tuple_name(access
, name
);
1683 sched
= access_schedule(gen
, access
, group
);
1685 n_tile
= gen
->n_block
;
1689 gen
->copy_sched
= isl_map_from_union_map(isl_union_map_copy(sched
));
1690 gen
->copy_group
= group
;
1691 gen
->copy_bound
= group
->shared_bound
;
1693 print_shared_body(gen
, shared_domain
, sched
, nvar
+ n_tile
,
1694 &print_copy_statement
, -1);
1696 isl_union_map_free(sched
);
1697 isl_map_free(gen
->copy_sched
);
1700 /* Return the union of all read (read = 1) and/or write (write = 1)
1701 * access relations in the group.
1703 static __isl_give isl_union_map
*group_access_relation(
1704 struct cuda_array_ref_group
*group
, int read
, int write
)
1707 isl_union_map
*access
;
1709 access
= isl_union_map_empty(isl_map_get_space(group
->access
));
1710 for (i
= 0; i
< group
->n_ref
; ++i
) {
1713 if (!((read
&& group
->refs
[i
]->read
) ||
1714 (write
&& group
->refs
[i
]->write
)))
1716 map_i
= isl_map_copy(group
->refs
[i
]->access
);
1717 access
= isl_union_map_union(access
,
1718 isl_union_map_from_map(map_i
));
1724 /* Check that none of the shared memory tiles involve any strides.
1726 static int no_strides(struct cuda_array_ref_group
*group
)
1729 int n_index
= group
->array
->n_index
;
1731 for (i
= 0; i
< n_index
; ++i
)
1732 if (group
->shared_bound
[i
].shift
)
1738 /* Return a set containing the values of the given index i
1739 * of the elements in the array tile in global memory that corresponds
1740 * to the shared memory copy.
1741 * In particular, if a is the index, we return a set with constraints
1743 * tile_offset <= a <= tile_offset + tile_size - 1
1747 * 0 <= a <= array_size - 1
1750 static __isl_give isl_set
*group_tile_dim(struct cuda_array_ref_group
*group
,
1753 isl_basic_set
*tile
;
1756 isl_local_space
*ls
;
1761 aff
= isl_aff_copy(group
->shared_bound
[i
].lb
);
1762 aff
= isl_aff_add_dims(aff
, isl_dim_in
, 1);
1763 ls
= isl_aff_get_domain_local_space(aff
);
1764 aff
= isl_aff_neg(aff
);
1765 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
1766 c
= isl_inequality_from_aff(isl_aff_copy(aff
));
1767 tile
= isl_basic_set_from_constraint(c
);
1769 aff
= isl_aff_neg(aff
);
1770 aff
= isl_aff_add_constant(aff
, group
->shared_bound
[i
].size
);
1771 aff
= isl_aff_add_constant_si(aff
, -1);
1772 c
= isl_inequality_from_aff(aff
);
1773 tile
= isl_basic_set_add_constraint(tile
, c
);
1775 aff
= isl_aff_zero_on_domain(ls
);
1776 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
1777 c
= isl_inequality_from_aff(aff
);
1778 tile
= isl_basic_set_add_constraint(tile
, c
);
1780 bound
= isl_pw_aff_copy(group
->array
->bound
[i
]);
1781 bound
= isl_pw_aff_add_dims(bound
, isl_dim_in
, 1);
1782 ls
= isl_local_space_from_space(isl_pw_aff_get_domain_space(bound
));
1783 aff
= isl_aff_zero_on_domain(ls
);
1784 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
1785 aff
= isl_aff_add_constant_si(aff
, 1);
1786 dom
= isl_pw_aff_domain(isl_pw_aff_copy(bound
));
1788 tile_set
= isl_pw_aff_ge_set(bound
, isl_pw_aff_alloc(dom
, aff
));
1789 tile_set
= isl_set_align_params(tile_set
, isl_basic_set_get_space(tile
));
1790 tile_set
= isl_set_intersect(tile_set
, isl_set_from_basic_set(tile
));
1795 /* Return a set containing the elements in the array tile in
1796 * global memory that corresponds to the shared memory copy.
1798 static __isl_give isl_set
*group_tile(struct cuda_array_ref_group
*group
)
1801 int n_index
= group
->array
->n_index
;
1804 tile
= group_tile_dim(group
, 0);
1805 for (i
= 1; i
< n_index
; ++i
) {
1808 tile_i
= group_tile_dim(group
, i
);
1809 tile
= isl_set_flat_product(tile
, tile_i
);
1812 tile
= isl_set_set_tuple_name(tile
, group
->array
->name
);
1817 /* Print code for reading into or writing from shared memory
1818 * the given array reference group.
1820 * sched maps the original iteration domains to the shared memory tile loops.
1822 * If we are performing a read from global memory to shared memory,
1823 * if the array involved is not a scalar and if the definition of the
1824 * shared memory tiles does not involve any strides, then we copy
1825 * the entire tile to shared memory. This may result in some extra
1826 * elements getting copied, but it should lead to simpler code
1827 * (which means that fewer registers may be needed) and less divergence.
1829 * Otherwise, we only copy the elements that will be read or have been written
1832 * Note that the absence of stride requirement can easily be lifted.
1833 * We would just need to add constraints of the form
1835 * shift + a = stride * alpha
1837 static int print_group_shared_accesses(struct cuda_gen
*gen
,
1838 struct cuda_array_ref_group
*group
, const char *type
,
1839 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*sched
)
1842 isl_union_map
*access
;
1843 isl_union_set
*uset
;
1844 isl_set
*access_set
;
1846 if (group
->private_bound
)
1848 if (!group
->shared_bound
)
1851 read
= !strcmp(type
, "read");
1853 access
= group_access_relation(group
, read
, !read
);
1854 access
= isl_union_map_apply_domain(access
, isl_union_map_copy(sched
));
1855 uset
= isl_union_map_range(access
);
1857 if (isl_union_set_is_empty(uset
)) {
1858 isl_union_set_free(uset
);
1862 if (read
&& group
->array
->n_index
> 0 && no_strides(group
)) {
1863 isl_union_set_free(uset
);
1864 access_set
= group_tile(group
);
1865 print_shared_access(gen
, shared_domain
, access_set
,
1870 access_set
= isl_set_from_union_set(uset
);
1871 access_set
= isl_set_coalesce(access_set
);
1873 print_shared_access(gen
, shared_domain
, access_set
, type
, group
);
1878 /* Print code for reading into or writing from shared memory at
1879 * the given level (-1 for innermost).
1881 * If we are not printing at the innermost level, then the dimensionality
1882 * of shared_domain may be smaller than gen->shared_len.
1883 * As the rest of the code assumes that the domain of access has
1884 * gen->shared_len dimensions, we therefore may need to embed this domain
1885 * in a higher dimensional space after intersection with shared_domain.
1887 static void print_shared_accesses(struct cuda_gen
*gen
,
1888 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*access
,
1889 const char *type
, int level
)
1895 int shared_len
= isl_set_dim(shared_domain
, isl_dim_set
);
1897 isl_union_map
*sched
;
1899 shared_domain
= isl_set_copy(shared_domain
);
1900 sched
= isl_union_map_copy(gen
->tiled_sched
);
1901 dim
= isl_union_map_get_space(sched
);
1902 proj
= projection(dim
, gen
->tiled_len
, shared_len
);
1903 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
1904 sched
= isl_union_map_intersect_range(sched
,
1905 isl_union_set_from_set(isl_set_copy(shared_domain
)));
1906 if (shared_len
!= gen
->shared_len
) {
1907 dim
= isl_union_map_get_space(sched
);
1908 proj
= projection(dim
, gen
->shared_len
, shared_len
);
1909 proj
= isl_map_reverse(proj
);
1910 shared_domain
= isl_set_apply(shared_domain
,
1911 isl_map_copy(proj
));
1912 sched
= isl_union_map_apply_range(sched
,
1913 isl_union_map_from_map(proj
));
1916 dim
= isl_union_map_get_space(sched
);
1917 par
= parametrization(dim
, gen
->shared_len
, 0, gen
->shared_len
, "g");
1918 sched
= isl_union_map_intersect_range(sched
,
1919 isl_union_set_from_set(par
));
1921 for (i
= 0; i
< gen
->n_array
; ++i
) {
1922 struct cuda_array_info
*array
= &gen
->array
[i
];
1924 if (gen
->array
[i
].print_shared_level
!= level
)
1927 for (j
= 0; j
< array
->n_group
; ++j
) {
1928 if (print_group_shared_accesses(gen
, array
->groups
[j
],
1929 type
, shared_domain
, sched
))
1934 isl_union_map_free(sched
);
1935 isl_set_free(shared_domain
);
1938 print_indent(gen
->cuda
.kernel_c
, gen
->kernel_code
.indent
);
1939 fprintf(gen
->cuda
.kernel_c
, "__syncthreads();\n");
1943 /* This function is called for each access to an array in some statement
1944 * in the original code.
1945 * Replace that access by an access to shared or (linearized) global memory.
1946 * Since the array in shared memory is just
1947 * a shifted copy of part of the original array, we simply need
1948 * to subtract the lower bound, which was computed
1949 * in can_tile_for_shared_memory.
1950 * If any of the indices is strided, then we first add
1951 * shared_bound[i].shift and divide by shared_bound[i].stride.
1953 * If the given array is accessed directly from global memory,
1954 * we don't need to perform any shifting and simply simplify
1955 * expression in the context of the domain instead.
1957 * If the array space (range of access) has no name, then we are
1958 * accessing an iterator in the original program.
1960 static void print_access(struct cuda_gen
*gen
, __isl_take isl_map
*access
,
1966 struct cuda_array_info
*array
= NULL
;
1968 isl_pw_multi_aff
*pma
;
1971 struct cuda_array_bound
*bounds
= NULL
;
1973 access
= isl_map_align_params(access
,
1974 isl_set_get_space(gen
->stmt_domain
));
1976 data_set
= isl_set_apply(isl_set_copy(gen
->stmt_domain
), access
);
1978 name
= isl_set_get_tuple_name(data_set
);
1981 fprintf(gen
->cuda
.kernel_c
, "(");
1983 struct cuda_array_ref_group
*group
;
1985 for (i
= 0; i
< gen
->n_array
; ++i
) {
1986 if (strcmp(name
, gen
->array
[i
].name
))
1988 array
= &gen
->array
[i
];
1991 group
= array
->groups
[group_nr
];
1992 bounds
= group
->private_bound
;
1994 bounds
= group
->shared_bound
;
1996 if (!bounds
&& cuda_array_is_scalar(array
) && !array
->read_only
)
1997 fprintf(gen
->cuda
.kernel_c
, "*");
1998 print_array_name(gen
->cuda
.kernel_c
, group
);
2000 if (cuda_array_is_scalar(array
)) {
2001 isl_set_free(data_set
);
2005 fprintf(gen
->cuda
.kernel_c
, "[");
2009 n_index
= isl_set_dim(data_set
, isl_dim_set
);
2010 pma
= isl_pw_multi_aff_from_set(data_set
);
2011 pma
= isl_pw_multi_aff_coalesce(pma
);
2013 prn
= isl_printer_to_file(gen
->ctx
, gen
->cuda
.kernel_c
);
2014 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
2017 for (i
= 0; i
+ 1 < n_index
; ++i
)
2018 prn
= isl_printer_print_str(prn
, "(");
2020 for (i
= 0; i
< n_index
; ++i
) {
2023 index
= isl_pw_multi_aff_get_pw_aff(pma
, i
);
2026 prn
= isl_printer_print_pw_aff(prn
, index
);
2027 isl_pw_aff_free(index
);
2031 domain
= isl_set_copy(gen
->stmt_domain
);
2032 domain
= isl_set_params(domain
);
2034 index
= isl_pw_aff_coalesce(index
);
2035 index
= isl_pw_aff_gist(index
, domain
);
2037 index
= shift_index(index
, array
, &bounds
[i
], domain
);
2041 prn
= isl_printer_print_str(prn
, ") * (");
2042 prn
= isl_printer_print_pw_aff(prn
,
2043 array
->local_bound
[i
]);
2044 prn
= isl_printer_print_str(prn
, ") + ");
2046 prn
= isl_printer_print_str(prn
, "][");
2048 prn
= isl_printer_print_pw_aff(prn
, index
);
2049 isl_pw_aff_free(index
);
2052 prn
= isl_printer_print_str(prn
, ")");
2054 prn
= isl_printer_print_str(prn
, "]");
2055 isl_printer_free(prn
);
2057 isl_pw_multi_aff_free(pma
);
2060 static struct cuda_stmt_access
*print_expr(struct cuda_gen
*gen
, FILE *out
,
2061 struct pet_expr
*expr
, struct cuda_stmt_access
*access
, int outer
)
2065 switch (expr
->type
) {
2066 case pet_expr_double
:
2067 fprintf(out
, "%g", expr
->d
);
2069 case pet_expr_access
:
2070 print_access(gen
, isl_map_copy(access
->access
), access
->group
);
2071 access
= access
->next
;
2073 case pet_expr_unary
:
2076 fprintf(out
, " %s ", pet_op_str(expr
->op
));
2077 access
= print_expr(gen
, out
, expr
->args
[pet_un_arg
],
2082 case pet_expr_binary
:
2085 access
= print_expr(gen
, out
, expr
->args
[pet_bin_lhs
],
2087 fprintf(out
, " %s ", pet_op_str(expr
->op
));
2088 access
= print_expr(gen
, out
, expr
->args
[pet_bin_rhs
],
2093 case pet_expr_ternary
:
2096 access
= print_expr(gen
, out
, expr
->args
[pet_ter_cond
],
2098 fprintf(out
, " ? ");
2099 access
= print_expr(gen
, out
, expr
->args
[pet_ter_true
],
2101 fprintf(out
, " : ");
2102 access
= print_expr(gen
, out
, expr
->args
[pet_ter_false
],
2108 fprintf(out
, "%s(", expr
->name
);
2109 for (i
= 0; i
< expr
->n_arg
; ++i
) {
2112 access
= print_expr(gen
, out
, expr
->args
[i
],
2120 static void print_stmt_body(struct cuda_gen
*gen
,
2121 FILE *out
, struct cuda_stmt
*stmt
)
2123 print_expr(gen
, out
, stmt
->body
, stmt
->accesses
, 1);
2124 fprintf(out
, ";\n");
2127 /* This function is called for each leaf in the innermost clast,
2128 * i.e., for each statement.
2129 * We print the statement body, simplifying the accesses based
2132 static void print_statement(struct gpucode_info
*code
,
2133 struct clast_user_stmt
*u
)
2135 struct cuda_gen
*gen
= code
->user
;
2138 isl_set
*stmt_domain
;
2139 isl_union_map
*stmt_sched
;
2140 isl_union_set
*uset
;
2142 struct cuda_stmt
*stmt
;
2144 nr
= atoi(u
->statement
->name
+ 2);
2145 stmt
= &gen
->stmts
[nr
];
2147 stmt_domain
= extract_host_domain(u
);
2149 stmt_sched
= isl_union_map_intersect_range(
2150 isl_union_map_copy(gen
->local_sched
),
2151 isl_union_set_from_set(extend(stmt_domain
,
2152 gen
->thread_tiled_len
)));
2153 dim
= isl_union_map_get_space(stmt_sched
);
2154 par
= parametrization(dim
, gen
->thread_tiled_len
, 0,
2155 gen
->thread_tiled_len
, "c");
2156 stmt_sched
= isl_union_map_intersect_range(stmt_sched
,
2157 isl_union_set_from_set(par
));
2159 uset
= isl_union_map_domain(stmt_sched
);
2160 dim
= isl_union_set_get_space(uset
);
2161 dim
= isl_space_add_dims(dim
, isl_dim_set
,
2162 isl_set_dim(stmt
->domain
, isl_dim_set
));
2163 dim
= isl_space_set_tuple_name(dim
, isl_dim_set
, u
->statement
->name
);
2164 gen
->stmt_domain
= isl_union_set_extract_set(uset
, dim
);
2165 isl_union_set_free(uset
);
2167 print_indent(code
->dst
, code
->indent
);
2168 print_stmt_body(gen
, code
->dst
, stmt
);
2170 isl_set_free(gen
->stmt_domain
);
2173 static void print_private_access(struct cuda_gen
*gen
,
2174 __isl_keep isl_set
*shared_domain
, __isl_take isl_set
*access
,
2175 const char *type
, struct cuda_array_ref_group
*group
)
2177 const char *array_name
;
2180 unsigned nvar
= isl_set_dim(access
, isl_dim_set
);
2181 isl_union_map
*usched
;
2183 if (isl_set_fast_is_empty(access
)) {
2184 isl_set_free(access
);
2188 ctx
= isl_set_get_ctx(access
);
2189 array_name
= isl_set_get_tuple_name(access
);
2190 name
= isl_alloc_array(ctx
, char,
2191 strlen(type
) + sizeof("_private_") + strlen(array_name
) + 20);
2192 if (group
->array
->n_group
> 1)
2193 sprintf(name
, "%s_private_%s_%d", type
, array_name
, group
->nr
);
2195 sprintf(name
, "%s_private_%s", type
, array_name
);
2196 access
= isl_set_set_tuple_name(access
, name
);
2199 gen
->copy_sched
= shift_access(access
, group
);
2200 gen
->copy_group
= group
;
2201 gen
->copy_bound
= group
->private_bound
;
2203 usched
= isl_union_map_from_map(isl_map_copy(gen
->copy_sched
));
2204 print_shared_body(gen
, shared_domain
, usched
, nvar
,
2205 &print_copy_statement
, 1);
2206 isl_union_map_free(usched
);
2208 isl_map_free(gen
->copy_sched
);
2211 /* Print code for reading into or writing from private memory
2212 * the given array reference group.
2214 * sched maps the original iteration domains to the shared memory tile loops.
2216 static void print_group_private_accesses(struct cuda_gen
*gen
,
2217 struct cuda_array_ref_group
*group
,
2218 const char *type
, __isl_keep isl_set
*shared_domain
,
2219 unsigned first_shared
, int shared_len
, __isl_keep isl_union_map
*sched
)
2222 isl_union_map
*access
;
2223 isl_union_set
*uset
;
2224 isl_set
*access_set
;
2226 if (!group
->private_bound
)
2229 read
= !strcmp(type
, "read");
2231 access
= group_access_relation(group
, read
, !read
);
2232 access
= isl_union_map_apply_domain(access
, isl_union_map_copy(sched
));
2233 access
= isl_union_map_intersect(access
,
2234 isl_union_map_copy(gen
->private_access
));
2235 uset
= isl_union_map_range(access
);
2237 if (isl_union_set_is_empty(uset
)) {
2238 isl_union_set_free(uset
);
2242 access_set
= isl_set_from_union_set(uset
);
2243 access_set
= isl_set_coalesce(access_set
);
2244 access_set
= isl_set_eliminate(access_set
, isl_dim_param
,
2245 first_shared
+ shared_len
,
2246 gen
->shared_len
- shared_len
);
2248 print_private_access(gen
, shared_domain
, access_set
, type
, group
);
2251 /* Print code for reading into or writing from private memory at
2252 * the given level (-1 for innermost).
2254 * If we are not printing at the innermost level, then the dimensionality
2255 * of shared_domain may be smaller than gen->shared_len.
2256 * As the rest of the code assumes that the domain of access has
2257 * gen->shared_len dimensions, we therefore may need to embed this domain
2258 * in a higher dimensional space after intersection with shared_domain.
2260 * This code is very similar to print_shared_accesses.
2261 * The main difference is that we to take into account gen->private_access.
2263 static void print_private_accesses(struct cuda_gen
*gen
,
2264 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*access
,
2265 const char *type
, int level
)
2270 int shared_len
= isl_set_dim(shared_domain
, isl_dim_set
);
2271 unsigned first_shared
;
2272 isl_union_map
*sched
;
2274 shared_domain
= isl_set_copy(shared_domain
);
2275 sched
= isl_union_map_copy(gen
->tiled_sched
);
2276 dim
= isl_union_map_get_space(sched
);
2277 first_shared
= isl_space_dim(dim
, isl_dim_param
);
2278 proj
= projection(dim
, gen
->tiled_len
, shared_len
);
2279 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
2280 sched
= isl_union_map_intersect_range(sched
,
2281 isl_union_set_from_set(isl_set_copy(shared_domain
)));
2282 if (shared_len
!= gen
->shared_len
) {
2283 dim
= isl_union_map_get_space(sched
);
2284 proj
= projection(dim
, gen
->shared_len
, shared_len
);
2285 proj
= isl_map_reverse(proj
);
2286 shared_domain
= isl_set_apply(shared_domain
,
2287 isl_map_copy(proj
));
2288 sched
= isl_union_map_apply_range(sched
,
2289 isl_union_map_from_map(proj
));
2292 for (i
= 0; i
< gen
->n_array
; ++i
) {
2293 struct cuda_array_info
*array
= &gen
->array
[i
];
2295 if (gen
->array
[i
].print_shared_level
!= level
)
2298 for (j
= 0; j
< array
->n_group
; ++j
)
2299 print_group_private_accesses(gen
, array
->groups
[j
],
2300 type
, shared_domain
,
2301 first_shared
, shared_len
, sched
);
2304 isl_union_map_free(sched
);
2305 isl_set_free(shared_domain
);
2308 /* Set unroll[j] if the input dimension j is involved in
2309 * the index expression represented by bmap.
2311 static int check_unroll(__isl_take isl_basic_map
*bmap
, void *user
)
2314 int n_in
= isl_basic_map_dim(bmap
, isl_dim_in
);
2315 int n_out
= isl_basic_map_dim(bmap
, isl_dim_out
);
2318 for (i
= 0; i
< n_out
; ++i
) {
2322 ok
= isl_basic_map_has_defining_equality(bmap
,
2323 isl_dim_out
, i
, &c
);
2325 for (j
= 0; j
< n_in
; ++j
)
2326 if (isl_constraint_involves_dims(c
, isl_dim_in
, j
, 1))
2328 isl_constraint_free(c
);
2331 isl_basic_map_free(bmap
);
2335 /* Given an array pos mapping input dimensions to the corresponding
2336 * output dimension, construct the corresponding map.
2338 static __isl_give isl_map
*permutation(__isl_take isl_space
*dim
,
2343 isl_basic_map
*bmap
;
2344 isl_local_space
*ls
;
2346 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
2347 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
);
2348 bmap
= isl_basic_map_universe(isl_space_copy(dim
));
2349 ls
= isl_local_space_from_space(dim
);
2351 for (i
= 0; i
< len
; ++i
) {
2352 c
= isl_equality_alloc(isl_local_space_copy(ls
));
2353 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, -1);
2354 isl_constraint_set_coefficient_si(c
, isl_dim_out
, pos
[i
], 1);
2355 bmap
= isl_basic_map_add_constraint(bmap
, c
);
2357 isl_local_space_free(ls
);
2359 return isl_map_from_basic_map(bmap
);
2362 /* Find all loops involved in any of the index expressions for any of
2363 * the private accesses, move them innermost and then mark them as
2364 * requiring unrolling by setting gen->first_unroll.
2365 * The loops involved should all be parallel because of the checks
2366 * we performed in check_private_group_access. Moving them innermost
2367 * is therefore a valid transformation.
2369 static __isl_give isl_union_map
*interchange_for_unroll(struct cuda_gen
*gen
,
2370 __isl_take isl_union_map
*sched
)
2373 int unroll
[gen
->thread_tiled_len
];
2374 int perm
[gen
->thread_tiled_len
];
2377 int len
= gen
->shared_len
+ gen
->n_parallel
+ gen
->n_block
;
2379 gen
->first_unroll
= -1;
2381 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
)
2383 for (i
= 0; i
< gen
->n_array
; ++i
) {
2384 struct cuda_array_info
*array
= &gen
->array
[i
];
2386 for (j
= 0; j
< array
->n_group
; ++j
) {
2387 isl_union_map
*access
;
2390 if (!array
->groups
[j
]->private_bound
)
2393 access
= group_access_relation(array
->groups
[j
], 1, 1);
2394 access
= isl_union_map_apply_domain(access
,
2395 isl_union_map_copy(sched
));
2397 acc
= isl_map_from_union_map(access
);
2398 isl_map_foreach_basic_map(acc
, &check_unroll
, unroll
);
2404 for (i
= 0; i
< gen
->shared_len
; ++i
)
2408 for (i
= gen
->shared_len
; i
< len
; ++i
)
2415 for (i
= len
; i
< gen
->thread_tiled_len
; ++i
)
2420 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
)
2423 gen
->first_unroll
= 1 + j
;
2424 for (i
= 0; i
< len
; ++i
)
2428 dim
= isl_union_map_get_space(sched
);
2429 permute
= permutation(dim
, perm
, gen
->thread_tiled_len
);
2430 sched
= isl_union_map_apply_range(sched
,
2431 isl_union_map_from_map(permute
));
2436 /* This function is called for each leaf in the clast of the kernel code.
2437 * We first specialize the schedule to the site of the leaf and
2438 * print code for reading into shared memory, performing the actual
2439 * computations and writing from shared memory, with the required
2442 static void print_kernel_user(struct gpucode_info
*code
,
2443 struct clast_user_stmt
*u
)
2445 struct cuda_gen
*gen
= code
->user
;
2446 isl_set
*shared_domain
;
2448 shared_domain
= extract_entire_host_domain(u
);
2450 print_shared_accesses(gen
, shared_domain
, gen
->read
, "read", -1);
2452 print_private_accesses(gen
, shared_domain
, gen
->read
, "read", -1);
2454 print_shared_body(gen
, shared_domain
, gen
->local_sched
,
2455 gen
->thread_tiled_len
, &print_statement
,
2458 print_private_accesses(gen
, shared_domain
, gen
->write
, "write", -1);
2460 print_indent(gen
->cuda
.kernel_c
, gen
->kernel_code
.indent
);
2461 fprintf(gen
->cuda
.kernel_c
, "__syncthreads();\n");
2463 print_shared_accesses(gen
, shared_domain
, gen
->write
, "write", -1);
2465 isl_set_free(shared_domain
);
2468 /* Check if we need to perform any copying to shared memory at this level
2469 * and if so, print the copying instructions.
2470 * Any array for which we are allowed to print copying instructions at
2471 * this level, but haven't done so already, is printed.
2473 static void copy_to_local(struct cuda_gen
*gen
, __isl_keep isl_set
*domain
)
2479 level
= isl_set_dim(domain
, isl_dim_set
);
2481 for (i
= 0; i
< gen
->n_array
; ++i
) {
2482 if (gen
->array
[i
].print_shared_level
>= 0)
2484 if (gen
->array
[i
].last_shared
>= level
)
2486 gen
->array
[i
].print_shared_level
= level
;
2491 print_shared_accesses(gen
, domain
, gen
->read
, "read", level
);
2492 print_private_accesses(gen
, domain
, gen
->read
, "read", level
);
2497 /* This function is called for each for loop in the clast,
2498 * right after the opening brace has been printed.
2500 * Print copying instructions to shared or private memory if needed.
2502 static void print_kernel_for_head(struct gpucode_info
*code
,
2503 struct clast_for
*f
)
2505 struct cuda_gen
*gen
= code
->user
;
2508 domain
= isl_set_from_cloog_domain(cloog_domain_copy(f
->domain
));
2509 copy_to_local(gen
, domain
);
2511 isl_set_free(domain
);
2514 /* Print instructions for copying from shared memory for each array
2515 * for which print_kernel_for_head has added copying instructions
2518 static void copy_from_local(struct cuda_gen
*gen
, __isl_keep isl_set
*domain
)
2524 level
= isl_set_dim(domain
, isl_dim_set
);
2526 for (i
= 0; i
< gen
->n_array
; ++i
) {
2527 if (gen
->array
[i
].print_shared_level
!= level
)
2534 print_private_accesses(gen
, domain
, gen
->write
, "write", level
);
2535 print_shared_accesses(gen
, domain
, gen
->write
, "write", level
);
2539 /* This function is called for each for loop in the clast,
2540 * right before the closing brace is printed.
2542 * Print copying instructions from shared or private memory if needed.
2544 static void print_kernel_for_foot(struct gpucode_info
*code
,
2545 struct clast_for
*f
)
2547 struct cuda_gen
*gen
= code
->user
;
2550 domain
= isl_set_from_cloog_domain(cloog_domain_copy(f
->domain
));
2551 copy_from_local(gen
, domain
);
2553 isl_set_free(domain
);
2556 /* Use CLooG to generate code for the outer gen->shared_first loops
2557 * of the local schedule "sched".
2558 * The pretty printing of this code is handled by gpu_print_host_stmt,
2559 * which calls print_kernel_user for each iteration of the shared tile loops.
2561 static void print_cloog_kernel_body(struct cuda_gen
*gen
,
2562 __isl_keep isl_set
*context
, __isl_keep isl_union_map
*sched
)
2565 CloogOptions
*options
;
2566 CloogDomain
*cloog_context
;
2567 CloogUnionDomain
*ud
;
2569 struct clast_stmt
*stmt
;
2572 sched
= isl_union_map_copy(sched
);
2573 sched
= isl_union_map_align_params(sched
, isl_set_get_space(context
));
2575 options
= cloog_options_malloc(gen
->state
);
2576 options
->language
= CLOOG_LANGUAGE_C
;
2577 options
->strides
= 1;
2579 options
->stop
= gen
->shared_len
;
2580 options
->f
= gen
->tiled_len
;
2581 options
->l
= gen
->tiled_len
;
2582 options
->save_domains
= 1;
2583 options
->noscalars
= 1;
2585 ud
= cloog_union_domain_from_isl_union_map(sched
);
2586 for (i
= 0; i
< gen
->shared_len
; ++i
) {
2587 snprintf(name
, sizeof(name
), "g%d", i
);
2588 ud
= cloog_union_domain_set_name(ud
, CLOOG_SCAT
, i
, name
);
2590 cloog_context
= cloog_domain_from_isl_set(isl_set_copy(context
));
2591 input
= cloog_input_alloc(cloog_context
, ud
);
2593 stmt
= cloog_clast_create_from_input(input
, options
);
2595 gen
->kernel_code
.indent
= 4;
2596 gen
->kernel_code
.dst
= gen
->cuda
.kernel_c
;
2597 gen
->kernel_code
.print_user_stmt
= NULL
;
2598 gen
->kernel_code
.print_user_stmt_list
= &print_kernel_user
;
2599 gen
->kernel_code
.print_for_head
= &print_kernel_for_head
;
2600 gen
->kernel_code
.print_for_foot
= &print_kernel_for_foot
;
2601 gen
->kernel_code
.user
= gen
;
2602 copy_to_local(gen
, context
);
2603 gpu_print_host_stmt(&gen
->kernel_code
, stmt
);
2604 copy_from_local(gen
, context
);
2606 cloog_clast_free(stmt
);
2607 cloog_options_free(options
);
2610 static void print_kernel_iterators(struct cuda_gen
*gen
)
2613 const char *block_dims
[] = { "blockIdx.x", "blockIdx.y" };
2614 const char *thread_dims
[] = { "threadIdx.x", "threadIdx.y",
2617 if (gen
->n_grid
> 0) {
2618 print_indent(gen
->cuda
.kernel_c
, 4);
2619 fprintf(gen
->cuda
.kernel_c
, "int ");
2620 for (i
= 0; i
< gen
->n_grid
; ++i
) {
2622 fprintf(gen
->cuda
.kernel_c
, ", ");
2623 fprintf(gen
->cuda
.kernel_c
, "b%d = %s",
2624 i
, block_dims
[gen
->n_grid
- 1 - i
]);
2626 fprintf(gen
->cuda
.kernel_c
, ";\n");
2629 if (gen
->n_block
> 0) {
2630 print_indent(gen
->cuda
.kernel_c
, 4);
2631 fprintf(gen
->cuda
.kernel_c
, "int ");
2632 for (i
= 0; i
< gen
->n_block
; ++i
) {
2634 fprintf(gen
->cuda
.kernel_c
, ", ");
2635 fprintf(gen
->cuda
.kernel_c
, "t%d = %s",
2636 i
, thread_dims
[gen
->n_block
- 1 - i
]);
2638 fprintf(gen
->cuda
.kernel_c
, ";\n");
2642 static void print_group_shared_array(struct cuda_gen
*gen
,
2643 struct cuda_array_ref_group
*group
)
2646 struct cuda_array_bound
*bounds
;
2648 bounds
= group
->private_bound
;
2650 bounds
= group
->shared_bound
;
2654 print_indent(gen
->cuda
.kernel_c
, 4);
2655 fprintf(gen
->cuda
.kernel_c
, "%s%s ",
2656 group
->private_bound
? "" : "__shared__ ", group
->array
->type
);
2657 print_array_name(gen
->cuda
.kernel_c
, group
);
2658 for (j
= 0; j
< group
->array
->n_index
; ++j
) {
2659 fprintf(gen
->cuda
.kernel_c
, "[");
2660 isl_int_print(gen
->cuda
.kernel_c
, bounds
[j
].size
, 0);
2661 fprintf(gen
->cuda
.kernel_c
, "]");
2663 fprintf(gen
->cuda
.kernel_c
, ";\n");
2666 static void print_shared_arrays(struct cuda_gen
*gen
)
2670 for (i
= 0; i
< gen
->n_array
; ++i
) {
2671 struct cuda_array_info
*array
= &gen
->array
[i
];
2673 for (j
= 0; j
< array
->n_group
; ++j
)
2674 print_group_shared_array(gen
, array
->groups
[j
]);
2678 static void print_kernel_body(struct cuda_gen
*gen
,
2679 __isl_keep isl_set
*host_domain
, __isl_keep isl_union_map
*sched
)
2683 context
= isl_set_copy(host_domain
);
2684 context
= parametrize(context
, 0, gen
->tile_first
, "h");
2685 context
= isl_set_project_out(context
, isl_dim_set
, 0, gen
->tile_first
);
2686 context
= add_bounded_parameters(context
,
2687 gen
->n_grid
, gen
->grid_dim
, "b");
2689 print_kernel_iterators(gen
);
2690 print_shared_arrays(gen
);
2692 fprintf(gen
->cuda
.kernel_c
, "\n");
2694 print_cloog_kernel_body(gen
, context
, sched
);
2696 isl_set_free(context
);
2699 /* Given a constraint
2701 * a(p,i) + j = g f(e)
2703 * or -a(p,i) - j = g f(e) if sign < 0,
2704 * store a(p,i) in bound->shift and g (stride) in bound->stride.
2705 * a(p,i) is assumed to be an expression in only the parameters.
2707 static void extract_stride(__isl_keep isl_constraint
*c
,
2708 struct cuda_array_bound
*bound
, isl_int stride
, int sign
)
2716 isl_int_set(bound
->stride
, stride
);
2718 dim
= isl_constraint_get_space(c
);
2719 dim
= isl_space_params(dim
);
2721 nparam
= isl_space_dim(dim
, isl_dim_param
);
2725 isl_constraint_get_constant(c
, &v
);
2728 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(dim
));
2729 aff
= isl_aff_set_constant(aff
, v
);
2731 for (i
= 0; i
< nparam
; ++i
) {
2732 isl_constraint_get_coefficient(c
, isl_dim_param
, i
, &v
);
2733 if (isl_int_is_zero(v
))
2737 aff
= isl_aff_add_coefficient(aff
, isl_dim_param
, i
, v
);
2745 /* Given an equality constraint of a map with a single output dimension j,
2746 * check if the constraint is of the form
2748 * a(p,i) + j = g f(e)
2750 * with a(p,i) an expression in the parameters and input dimensions
2751 * and f(e) an expression in the existentially quantified variables.
2752 * If so, and if g is larger than any such g from a previously considered
2753 * constraint, then call extract_stride. to record the stride information
2756 static int check_stride_constraint(__isl_take isl_constraint
*c
, void *user
)
2761 struct cuda_array_bound
*bound
= user
;
2764 isl_int_init(stride
);
2766 n_div
= isl_constraint_dim(c
, isl_dim_div
);
2767 isl_constraint_get_coefficient(c
, isl_dim_out
, 0, &v
);
2769 if (n_div
&& (isl_int_is_one(v
) || isl_int_is_negone(v
))) {
2770 int s
= isl_int_sgn(v
);
2771 isl_int_set_si(stride
, 0);
2772 for (i
= 0; i
< n_div
; ++i
) {
2773 isl_constraint_get_coefficient(c
, isl_dim_div
, i
, &v
);
2774 isl_int_gcd(stride
, stride
, v
);
2776 if (!isl_int_is_zero(stride
) &&
2777 isl_int_gt(stride
, bound
->stride
))
2778 extract_stride(c
, bound
, stride
, s
);
2781 isl_int_clear(stride
);
2784 isl_constraint_free(c
);
2788 /* Given contraints on an array index i, check if we can find
2789 * a shift a(p) and a stride g such that
2791 * a(p) + i = 0 mod g
2793 * If so, record the information in bound and apply the mapping
2794 * i -> (i + a(p))/g to the array index in bounds and return
2795 * the new constraints.
2796 * If not, simply return the original constraints.
2798 static __isl_give isl_basic_map
*check_stride(struct cuda_gen
*gen
,
2799 struct cuda_array_bound
*bound
, __isl_take isl_basic_map
*bounds
)
2802 isl_basic_map
*shift
;
2805 isl_int_set_si(bound
->stride
, -1);
2807 aff
= isl_basic_map_affine_hull(isl_basic_map_copy(bounds
));
2809 isl_basic_map_foreach_constraint(aff
, &check_stride_constraint
, bound
);
2811 isl_basic_map_free(aff
);
2813 if (isl_int_is_neg(bound
->stride
))
2816 aff_shift
= isl_aff_copy(bound
->shift
);
2817 aff_shift
= isl_aff_add_dims(aff_shift
, isl_dim_in
, 1);
2818 aff_shift
= isl_aff_add_coefficient_si(aff_shift
, isl_dim_in
, 0, 1);
2819 aff_shift
= isl_aff_scale_down(aff_shift
, bound
->stride
);
2820 shift
= isl_basic_map_from_aff(aff_shift
);
2822 bound
->shift_map
= isl_basic_map_copy(shift
);
2823 bounds
= isl_basic_map_apply_range(bounds
, shift
);
2828 struct cuda_size_info
{
2829 isl_basic_set
*bset
;
2830 struct cuda_array_bound
*bound
;
2834 /* Given a constraint from the basic set describing the bounds on
2835 * an array index, check if it is a lower bound, say m i >= b(x), and,
2836 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
2837 * upper bound. If so, and if this bound is smaller than any bound
2838 * derived from earlier constraints, set the size to this bound on
2839 * the expression and the lower bound to ceil(b(x)/m).
2841 static int compute_size_in_direction(__isl_take isl_constraint
*c
, void *user
)
2843 struct cuda_size_info
*size
= user
;
2848 nparam
= isl_basic_set_dim(size
->bset
, isl_dim_param
);
2849 n_div
= isl_constraint_dim(c
, isl_dim_div
);
2851 if (isl_constraint_involves_dims(c
, isl_dim_div
, 0, n_div
)) {
2852 isl_constraint_free(c
);
2858 isl_constraint_get_coefficient(c
, isl_dim_set
, size
->pos
, &v
);
2860 if (isl_int_is_pos(v
)) {
2863 enum isl_lp_result res
;
2865 aff
= isl_constraint_get_bound(c
, isl_dim_set
, size
->pos
);
2866 aff
= isl_aff_ceil(aff
);
2868 lb
= isl_aff_copy(aff
);
2870 aff
= isl_aff_neg(aff
);
2871 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, size
->pos
, 1);
2873 res
= isl_basic_set_max(size
->bset
, aff
, &v
);
2876 if (res
== isl_lp_ok
) {
2877 isl_int_add_ui(v
, v
, 1);
2878 if (isl_int_is_neg(size
->bound
->size
) ||
2879 isl_int_lt(v
, size
->bound
->size
)) {
2880 isl_int_set(size
->bound
->size
, v
);
2881 lb
= isl_aff_drop_dims(lb
, isl_dim_in
,
2883 isl_aff_free(size
->bound
->lb
);
2884 size
->bound
->lb
= isl_aff_copy(lb
);
2891 isl_constraint_free(c
);
2896 /* Given a basic map "bounds" that maps parameters and input dimensions
2897 * to a single output dimension, look for an expression in the parameters
2898 * and input dimensions such that the range of the output dimension shifted
2899 * by this expression is a constant.
2901 * In particular, we currently only consider lower bounds on the output
2902 * dimension as candidate expressions.
2904 static int compute_array_dim_size(struct cuda_gen
*gen
,
2905 struct cuda_array_bound
*bound
, __isl_take isl_basic_map
*bounds
)
2907 struct cuda_size_info size
;
2909 bounds
= isl_basic_map_detect_equalities(bounds
);
2910 bounds
= check_stride(gen
, bound
, bounds
);
2912 isl_int_set_si(bound
->size
, -1);
2916 size
.pos
= isl_basic_map_dim(bounds
, isl_dim_in
);
2917 size
.bset
= isl_basic_map_wrap(bounds
);
2918 size
.bset
= isl_basic_set_flatten(size
.bset
);
2919 size
.bset
= isl_set_simple_hull(isl_basic_set_compute_divs(size
.bset
));
2920 isl_basic_set_foreach_constraint(size
.bset
, &compute_size_in_direction
,
2922 isl_basic_set_free(size
.bset
);
2924 return isl_int_is_nonneg(bound
->size
) ? 0 : -1;
2927 /* Check if we can find a shared memory tile for the given array
2928 * based on the given accesses, and if so, put the results
2929 * in array->shared_bound.
2931 * We project the accesses on each index in turn and look for a parametric
2932 * offset such that the size is constant.
2934 static int can_tile_for_shared_memory(struct cuda_gen
*gen
,
2935 struct cuda_array_info
*array
, __isl_keep isl_map
*access
,
2936 struct cuda_array_bound
*bounds
)
2940 for (i
= 0; i
< array
->n_index
; ++i
) {
2942 isl_basic_map
*hull
;
2944 access_i
= isl_map_copy(access
);
2945 access_i
= isl_map_project_out(access_i
, isl_dim_out
, 0, i
);
2946 access_i
= isl_map_project_out(access_i
, isl_dim_out
,
2947 1, array
->n_index
- (i
+ 1));
2948 access_i
= isl_map_compute_divs(access_i
);
2949 hull
= isl_map_simple_hull(access_i
);
2950 if (compute_array_dim_size(gen
, &bounds
[i
], hull
) < 0)
2957 /* Construct a map with input the shared tile loops and the loops that
2958 * will be wrapped around the threads that relates these later loops
2959 * to the thread indices and the projects them out.
2961 static __isl_give isl_map
*compute_privatization(struct cuda_gen
*gen
)
2969 dim
= isl_union_map_get_space(gen
->shared_sched
);
2971 if (gen
->options
->wrap
)
2972 tiling
= wrap(isl_space_copy(dim
), gen
->shared_len
+ gen
->n_block
,
2973 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
2975 tiling
= tile(isl_space_copy(dim
), gen
->shared_len
+ gen
->n_block
,
2976 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
2980 par
= parametrization(dim
, gen
->shared_len
+ 2 * gen
->n_block
,
2981 gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
+ gen
->n_block
,
2984 priv
= isl_map_align_params(priv
, isl_set_get_space(par
));
2985 priv
= isl_map_intersect_range(priv
, par
);
2987 dim
= isl_map_get_space(priv
);
2988 dim
= isl_space_drop_dims(dim
, isl_dim_in
, 0, isl_space_dim(dim
, isl_dim_in
));
2989 dim
= isl_space_drop_dims(dim
, isl_dim_out
, 0, isl_space_dim(dim
, isl_dim_out
));
2990 proj
= projection(dim
, gen
->shared_len
+ 2 * gen
->n_block
,
2993 priv
= isl_map_apply_range(priv
, proj
);
2998 /* Construct a map from domain_dim to domain_dim that increments
2999 * the dimension at position "pos" and leaves all other dimensions
3002 static __isl_give isl_map
*next(__isl_take isl_space
*domain_dim
, int pos
)
3005 int len
= isl_space_dim(domain_dim
, isl_dim_set
);
3007 isl_basic_map
*next
;
3008 isl_local_space
*ls
;
3010 dim
= isl_space_map_from_set(domain_dim
);
3011 next
= isl_basic_map_universe(isl_space_copy(dim
));
3012 ls
= isl_local_space_from_space(dim
);
3014 for (i
= 0; i
< len
; ++i
) {
3017 c
= isl_equality_alloc(isl_local_space_copy(ls
));
3018 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, 1);
3019 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
3021 isl_constraint_set_constant_si(c
, 1);
3022 next
= isl_basic_map_add_constraint(next
, c
);
3025 isl_local_space_free(ls
);
3027 return isl_map_from_basic_map(next
);
3030 /* Check if the given access is coalesced.
3031 * That is, check whether incrementing the dimension that will get
3032 * wrapped over the last thread index results in incrementing
3033 * the last array index.
3035 * This function is only called for access relations without reuse.
3037 static int access_is_coalesced(struct cuda_gen
*gen
,
3038 __isl_keep isl_union_map
*access
)
3041 isl_map
*access_map
;
3042 isl_map
*next_thread_x
;
3043 isl_map
*next_element
;
3047 access
= isl_union_map_copy(access
);
3048 access
= isl_union_map_apply_domain(access
,
3049 isl_union_map_copy(gen
->tiled_sched
));
3050 access_map
= isl_map_from_union_map(access
);
3052 dim
= isl_map_get_space(access_map
);
3053 dim
= isl_space_domain(dim
);
3054 next_thread_x
= next(dim
, gen
->shared_len
+ gen
->n_block
- 1);
3056 dim
= isl_map_get_space(access_map
);
3057 dim
= isl_space_range(dim
);
3058 next_element
= next(dim
, isl_space_dim(dim
, isl_dim_set
) - 1);
3060 map
= isl_map_apply_domain(next_thread_x
, isl_map_copy(access_map
));
3061 map
= isl_map_apply_range(map
, access_map
);
3063 coalesced
= isl_map_is_subset(map
, next_element
);
3065 isl_map_free(next_element
);
3071 /* For the given array reference group, check whether the access is private
3072 * to the thread. That is, check that any given array element
3073 * is only accessed by a single thread.
3074 * We compute an access relation that maps the shared tile loop iterators
3075 * and the shared point loop iterators that will be wrapped over the
3076 * threads to the array elements.
3077 * We actually check that those iterators that will be wrapped
3078 * partition the array space. This check is stricter than necessary
3079 * since several iterations may be mapped onto the same thread
3080 * and then they could be allowed to access the same memory elements,
3081 * but our check does not allow this situation.
3083 * We also check that the index expression only depends on parallel
3084 * loops. That way, we can move those loops innermost and unroll them.
3085 * Again, we use a test that is stricter than necessary.
3086 * We actually check whether the index expression only depends
3087 * on the iterators that are wrapped over the threads.
3088 * These are necessarily parallel, but there may be more parallel loops.
3090 * Combining the injectivity of the first test with the single-valuedness
3091 * of the second test, we simply test for bijectivity.
3093 * If it turns out we can use registers, we compute the private memory
3094 * tile size using can_tile_for_shared_memory, after introducing a dependence
3095 * on the thread indices.
3097 * Before performing any of the above computations, we first check
3098 * if there is any reuse on the reference group. If not, we simply
3099 * return. If, moreover, the access is coalesced then we also remove
3100 * the shared memory tiling since we should just use global memory instead.
3102 static void check_private_group_access(struct cuda_gen
*gen
,
3103 struct cuda_array_ref_group
*group
)
3106 isl_union_map
*access
;
3107 int n_index
= group
->array
->n_index
;
3109 access
= group_access_relation(group
, 1, 1);
3110 if (isl_union_map_is_injective(access
)) {
3111 if (group
->shared_bound
&& access_is_coalesced(gen
, access
)) {
3112 free_bound_list(group
->shared_bound
, n_index
);
3113 group
->shared_bound
= NULL
;
3115 isl_union_map_free(access
);
3118 access
= isl_union_map_apply_domain(access
,
3119 isl_union_map_copy(gen
->shared_sched
));
3121 acc
= isl_map_from_union_map(access
);
3123 if (!isl_map_is_bijective(acc
)) {
3128 group
->private_bound
= create_bound_list(gen
->ctx
, n_index
);
3129 acc
= isl_map_align_params(acc
, isl_map_get_space(gen
->privatization
));
3130 acc
= isl_map_apply_domain(acc
, isl_map_copy(gen
->privatization
));
3131 if (!can_tile_for_shared_memory(gen
, group
->array
, acc
,
3132 group
->private_bound
)) {
3133 free_bound_list(group
->private_bound
, n_index
);
3134 group
->private_bound
= NULL
;
3140 /* Look for the last shared tile loop that affects the offset of the
3141 * shared or private tile and store the result in array->last_shared.
3143 static void set_last_shared(struct cuda_gen
*gen
,
3144 struct cuda_array_ref_group
*group
)
3147 struct cuda_array_bound
*bounds
;
3148 unsigned first_shared
= gen
->first_shared
;
3149 int n_index
= group
->array
->n_index
;
3151 bounds
= group
->private_bound
;
3153 bounds
= group
->shared_bound
;
3157 for (j
= gen
->shared_len
- 1; j
>= 0; --j
) {
3158 for (i
= 0; i
< n_index
; ++i
) {
3163 if (isl_aff_involves_dims(lb
, isl_dim_param
,
3164 first_shared
+ j
, 1))
3167 shift
= bounds
[i
].shift
;
3170 if (isl_aff_involves_dims(shift
, isl_dim_param
,
3171 first_shared
+ j
, 1))
3177 group
->array
->last_shared
= j
;
3180 /* Compute the sizes of all private arrays for the current kernel,
3181 * as well as the offsets of the private pieces in the original arrays.
3182 * If we cannot or don't want to privatize a given array group,
3183 * we use the shared memory tile sizes computed in
3184 * compute_group_shared_bound instead.
3186 * If a given Array only has a single reference group and if we have
3187 * been able to find a privated or shared tile,
3188 * we also look for the last shared tile loop that affects the offset
3189 * (and therefore the array tile) and store the result in array->last_shared.
3191 * A privatized copy of all access relations from reference groups that
3192 * are mapped to private memory is stored in gen->privatization.
3194 static void compute_private_size(struct cuda_gen
*gen
)
3197 isl_union_map
*private;
3199 if (!gen
->options
->use_private_memory
)
3202 private = isl_union_map_empty(isl_union_map_get_space(gen
->shared_sched
));
3204 for (i
= 0; i
< gen
->n_array
; ++i
) {
3205 struct cuda_array_info
*array
= &gen
->array
[i
];
3207 for (j
= 0; j
< array
->n_group
; ++j
) {
3208 check_private_group_access(gen
, array
->groups
[j
]);
3210 if (!array
->groups
[j
]->private_bound
)
3213 private = isl_union_map_union(private,
3214 group_access_relation(array
->groups
[j
], 1, 1));
3217 array
->last_shared
= gen
->shared_len
- 1;
3218 array
->print_shared_level
= -1;
3220 if (array
->n_group
!= 1)
3222 set_last_shared(gen
, array
->groups
[0]);
3225 if (isl_union_map_is_empty(private))
3226 isl_union_map_free(private);
3228 isl_union_map
*priv
;
3230 private = isl_union_map_apply_domain(private,
3231 isl_union_map_copy(gen
->shared_sched
));
3232 priv
= isl_union_map_from_map(isl_map_copy(gen
->privatization
));
3233 private = isl_union_map_apply_domain(private, priv
);
3234 gen
->private_access
= private;
3238 /* Fill up the groups array with singleton groups, i.e., one group
3239 * per reference, initializing the array, access, write and refs fields.
3240 * In particular the access field is initialized to the scheduled
3241 * access relation of the array reference.
3243 * Return the number of elements initialized, i.e., the number of
3244 * active references in the current kernel.
3246 static int populate_array_references(struct cuda_gen
*gen
,
3247 struct cuda_array_info
*array
, __isl_keep isl_union_map
*sched
,
3248 struct cuda_array_ref_group
**groups
)
3252 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
3255 for (i
= 0; i
< array
->n_ref
; ++i
) {
3256 isl_union_map
*umap
;
3258 struct cuda_array_ref_group
*group
;
3259 struct cuda_stmt_access
*access
= array
->refs
[i
];
3261 map
= isl_map_copy(access
->access
);
3262 umap
= isl_union_map_from_map(map
);
3263 umap
= isl_union_map_apply_domain(umap
,
3264 isl_union_map_copy(sched
));
3266 if (isl_union_map_is_empty(umap
)) {
3267 isl_union_map_free(umap
);
3271 map
= isl_map_from_union_map(umap
);
3273 group
= isl_calloc_type(ctx
, struct cuda_array_ref_group
);
3275 group
->array
= array
;
3276 group
->access
= map
;
3277 group
->write
= access
->write
;
3278 group
->refs
= &array
->refs
[i
];
3280 groups
[n
++] = group
;
3286 static void free_array_ref_group(struct cuda_array_ref_group
*group
,
3291 free_bound_list(group
->shared_bound
, n_index
);
3292 free_bound_list(group
->private_bound
, n_index
);
3293 isl_map_free(group
->access
);
3298 /* If two groups have overlapping access relations and if one of them
3299 * involves a write, then merge the two groups into one.
3301 * We keep track of the grouping in "leader". leader[j] points to
3302 * an earlier group array element that belongs to the same group,
3303 * or the array element j itself if this element is the first in the group.
3305 * Return the number of group leaders.
3307 static int group_overlapping_writes(int n
,
3308 struct cuda_array_ref_group
**groups
, int *leader
)
3313 for (i
= 0; i
< n
; ++i
) {
3315 groups
[l
]->n_ref
= 1;
3316 for (j
= i
- 1; j
>= 0; --j
) {
3322 if (!groups
[l
]->write
&& !groups
[j
]->write
)
3325 map
= isl_map_intersect(isl_map_copy(groups
[l
]->access
),
3326 isl_map_copy(groups
[j
]->access
));
3327 empty
= isl_map_is_empty(map
);
3333 groups
[j
]->access
= isl_map_union(groups
[j
]->access
,
3335 groups
[j
]->write
= 1;
3336 groups
[l
]->access
= NULL
;
3337 groups
[j
]->n_ref
+= groups
[l
]->n_ref
;
3347 /* Compute the size of the shared array corresponding to the given array
3348 * array refrence group, based on the accesses from the current kernel,
3349 * as well as the offset of the shared piece in the original array.
3351 static void compute_group_shared_bound(struct cuda_gen
*gen
,
3352 struct cuda_array_info
*array
, struct cuda_array_ref_group
*group
)
3354 isl_ctx
*ctx
= isl_space_get_ctx(array
->dim
);
3356 if (!gen
->options
->use_shared_memory
)
3358 if (cuda_array_is_read_only_scalar(array
))
3361 group
->shared_bound
= create_bound_list(ctx
, array
->n_index
);
3362 if (!can_tile_for_shared_memory(gen
, array
, group
->access
,
3363 group
->shared_bound
)) {
3364 free_bound_list(group
->shared_bound
, array
->n_index
);
3365 group
->shared_bound
= NULL
;
3369 /* Given an initial grouping of array references and shared memory tiles
3370 * for each group that allows for a shared memory tile, merge two groups
3371 * if both have a shared memory tile and if the merged group also has
3372 * a shared memory tile.
3374 * Return the number of group leaders after merging.
3376 static int group_common_shared_memory_tile(struct cuda_gen
*gen
,
3377 struct cuda_array_info
*array
, int n
,
3378 struct cuda_array_ref_group
**groups
, int *leader
, int n_group
)
3381 isl_ctx
*ctx
= isl_space_get_ctx(array
->dim
);
3383 for (i
= 0; n_group
> 1 && i
< n
; ++i
) {
3387 if (!groups
[i
]->shared_bound
)
3389 for (j
= i
- 1; j
>= 0; --j
) {
3392 struct cuda_array_bound
*shared_bound
;
3396 if (!groups
[j
]->shared_bound
)
3399 map
= isl_map_intersect(isl_map_copy(groups
[l
]->access
),
3400 isl_map_copy(groups
[j
]->access
));
3401 empty
= isl_map_is_empty(map
);
3407 map
= isl_map_union(isl_map_copy(groups
[l
]->access
),
3408 isl_map_copy(groups
[j
]->access
));
3409 shared_bound
= create_bound_list(ctx
, array
->n_index
);
3410 if (!can_tile_for_shared_memory(gen
, array
, map
,
3413 free_bound_list(shared_bound
, array
->n_index
);
3417 free_bound_list(groups
[j
]->shared_bound
,
3419 groups
[j
]->shared_bound
= shared_bound
;
3420 isl_map_free(groups
[j
]->access
);
3421 groups
[j
]->access
= map
;
3422 groups
[j
]->n_ref
+= groups
[l
]->n_ref
;
3431 /* Extract an array of array reference groups from the array of references
3432 * and the grouping information in "leader".
3434 * Store the results in array->n_group and array->groups.
3436 static void extract_array_groups(isl_ctx
*ctx
, struct cuda_array_info
*array
,
3437 int n
, struct cuda_array_ref_group
**groups
, int *leader
, int n_group
)
3441 for (i
= 2; i
< n
; ++i
)
3442 leader
[i
] = leader
[leader
[i
]];
3444 array
->n_group
= n_group
;
3445 array
->groups
= isl_alloc_array(ctx
, struct cuda_array_ref_group
*,
3447 assert(array
->groups
);
3450 for (i
= 0; i
< n
; ++i
) {
3452 struct cuda_stmt_access
**refs
;
3454 if (leader
[i
] != i
) {
3455 groups
[i
]->refs
= NULL
;
3456 free_array_ref_group(groups
[i
], array
->n_index
);
3460 refs
= isl_alloc_array(ctx
, struct cuda_stmt_access
*,
3464 for (k
= i
; k
< n
; ++k
)
3465 if (leader
[k
] == i
) {
3466 refs
[l
++] = *groups
[k
]->refs
;
3467 (*groups
[k
]->refs
)->group
= j
;
3470 groups
[i
]->refs
= refs
;
3472 array
->groups
[j
++] = groups
[i
];
3476 /* Group array references that should be considered together when
3477 * deciding whether to access them from private, shared or global memory.
3479 * In particular, if two array references overlap and if one of them
3480 * is a write, then the two references are grouped together.
3481 * Furthermore, if two groups admit a shared memory tile and if the
3482 * combination of the two also admits a shared memory tile, we merge
3485 * During the construction the group->refs field points to a single
3486 * array reference inside the array of array references, while
3487 * group->n_ref contains the number of element in leader that
3488 * (directly or indirectly) point to this group, provided the group
3491 static void group_array_references(struct cuda_gen
*gen
,
3492 struct cuda_array_info
*array
, __isl_keep isl_union_map
*sched
)
3496 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
3497 struct cuda_array_ref_group
**groups
;
3500 groups
= isl_calloc_array(ctx
, struct cuda_array_ref_group
*,
3504 n
= populate_array_references(gen
, array
, sched
, groups
);
3506 leader
= isl_alloc_array(ctx
, int, n
);
3509 n_group
= group_overlapping_writes(n
, groups
, leader
);
3511 for (i
= 0; i
< n
; ++i
)
3513 compute_group_shared_bound(gen
, array
, groups
[i
]);
3515 n_group
= group_common_shared_memory_tile(gen
, array
, n
, groups
,
3518 extract_array_groups(ctx
, array
, n
, groups
, leader
, n_group
);
3524 /* Take tiled_sched, project it onto the shared tile loops and
3525 * the loops that will be wrapped over the threads,
3526 * parametrize the shared tile loops and store the result in gen->shared_sched.
3527 * The position of the first of these parameters is stored in gen->first_shared.
3528 * Also compute a projection that projects out the loops that will be
3529 * wrapped over the threads and store this projection in gen->shared_proj.
3531 static void compute_shared_sched(struct cuda_gen
*gen
)
3536 isl_union_map
*sched
;
3538 sched
= isl_union_map_copy(gen
->tiled_sched
);
3540 dim
= isl_union_map_get_space(sched
);
3541 gen
->first_shared
= isl_space_dim(dim
, isl_dim_param
);
3542 proj
= projection(dim
, gen
->tiled_len
, gen
->shared_len
+ gen
->n_block
);
3543 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
3545 dim
= isl_union_map_get_space(sched
);
3546 par
= parametrization(dim
, gen
->shared_len
+ gen
->n_block
,
3547 0, gen
->shared_len
, "g");
3548 sched
= isl_union_map_intersect_range(sched
,
3549 isl_union_set_from_set(par
));
3551 dim
= isl_union_map_get_space(sched
);
3552 proj
= projection(dim
, gen
->shared_len
+ gen
->n_block
, gen
->shared_len
);
3554 gen
->shared_sched
= sched
;
3555 gen
->shared_proj
= isl_union_map_from_map(proj
);
3558 /* Group references of all arrays in the program.
3560 static void group_references(struct cuda_gen
*gen
)
3563 isl_union_map
*sched
;
3565 sched
= isl_union_map_apply_range(isl_union_map_copy(gen
->shared_sched
),
3566 isl_union_map_copy(gen
->shared_proj
));
3568 for (i
= 0; i
< gen
->n_array
; ++i
)
3569 group_array_references(gen
, &gen
->array
[i
], sched
);
3571 isl_union_map_free(sched
);
3574 /* Free all array information that is local to the current kernel.
3576 static void free_local_array_info(struct cuda_gen
*gen
)
3580 for (i
= 0; i
< gen
->n_array
; ++i
) {
3581 struct cuda_array_info
*array
= &gen
->array
[i
];
3583 for (j
= 0; j
< array
->n_group
; ++j
)
3584 free_array_ref_group(array
->groups
[j
], array
->n_index
);
3585 free(array
->groups
);
3587 if (array
->n_group
== 0)
3589 for (j
= 0; j
< gen
->array
[i
].n_index
; ++j
) {
3590 isl_pw_aff_free(gen
->array
[i
].local_bound
[j
]);
3591 gen
->array
[i
].local_bound
[j
] = NULL
;
3596 static void print_iterator_list(FILE *out
, int len
, const char *prefix
,
3602 for (i
= 0; i
< len
; ++i
) {
3606 fprintf(out
, "(%s%d)", prefix
, i
);
3608 fprintf(out
, "%s%d", prefix
, i
);
3613 /* The sizes of the arrays on the host that have been computed by
3614 * extract_array_info may depend on the parameters. Use the extra
3615 * constraints on the parameters that are valid at "host_domain"
3616 * to simplify these expressions.
3618 static void localize_bounds(struct cuda_gen
*gen
,
3619 __isl_keep isl_set
*host_domain
)
3624 context
= isl_set_copy(host_domain
);
3625 context
= isl_set_params(host_domain
);
3627 for (i
= 0; i
< gen
->n_array
; ++i
) {
3628 struct cuda_array_info
*array
= &gen
->array
[i
];
3630 if (array
->n_group
== 0)
3633 for (j
= 0; j
< array
->n_index
; ++j
) {
3636 pwaff
= isl_pw_aff_copy(array
->bound
[j
]);
3637 pwaff
= isl_pw_aff_gist(pwaff
, isl_set_copy(context
));
3638 array
->local_bound
[j
] = pwaff
;
3641 isl_set_free(context
);
3644 /* Set gen->tile_len and gen->n_parallel to those of the first statement
3645 * in the statement list u.
3646 * Because of the way the schedule is constructed, the other statements
3647 * in the list, if any, should have the same values for these properties.
3649 static void set_tile_len(struct cuda_gen
*gen
, struct clast_user_stmt
*u
)
3652 struct cuda_stmt
*stmt
;
3654 nr
= atoi(u
->statement
->name
+ 2);
3655 stmt
= &gen
->stmts
[nr
];
3657 gen
->tile_len
= stmt
->tile_len
;
3658 gen
->n_parallel
= stmt
->n_parallel
;
3661 /* Extract a description of the grid, i.e., the possible values
3662 * of the block ids, from gen->tiled_sched.
3663 * The block ids are parameters in gen->tiled_sched.
3664 * We simply need to change them into set dimensions.
3666 static __isl_give isl_set
*extract_grid(struct cuda_gen
*gen
)
3671 grid
= isl_union_map_params(isl_union_map_copy(gen
->tiled_sched
));
3672 grid
= isl_set_from_params(grid
);
3673 grid
= isl_set_add_dims(grid
, isl_dim_set
, gen
->n_grid
);
3674 for (i
= 0; i
< gen
->n_grid
; ++i
) {
3678 snprintf(name
, sizeof(name
), "b%d", i
);
3679 pos
= isl_set_find_dim_by_name(grid
, isl_dim_param
, name
);
3681 grid
= isl_set_equate(grid
, isl_dim_param
, pos
, isl_dim_set
, i
);
3682 grid
= isl_set_project_out(grid
, isl_dim_param
, pos
, 1);
3688 /* Print the effective grid size as a list of the sizes in each
3689 * dimension, from innermost to outermost.
3691 * The grid size specified by the user or set by default
3692 * in read_grid_sizes() and applied in tile_schedule(),
3693 * may be too large for the given code in the sense that
3694 * it may contain blocks that don't need to execute anything.
3695 * We therefore don't print this grid size, but instead the
3696 * smallest grid size that ensures that all blocks that actually
3697 * execute code are included in the grid.
3699 * For each block dimension, we compute the maximal value of the block id
3702 static void print_grid_size(struct cuda_gen
*gen
, __isl_take isl_set
*context
)
3708 if (gen
->n_grid
== 0)
3711 grid
= extract_grid(gen
);
3713 prn
= isl_printer_to_file(gen
->ctx
, gen
->cuda
.host_c
);
3714 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
3716 prn
= isl_printer_print_str(prn
, "(");
3717 for (i
= gen
->n_grid
- 1; i
>= 0; --i
) {
3720 isl_pw_aff
*bound
= isl_set_dim_max(isl_set_copy(grid
), i
);
3722 bound
= isl_pw_aff_coalesce(bound
);
3723 bound
= isl_pw_aff_gist(bound
, isl_set_copy(context
));
3725 space
= isl_pw_aff_get_domain_space(bound
);
3726 one
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
3727 one
= isl_aff_add_constant_si(one
, 1);
3728 bound
= isl_pw_aff_add(bound
, isl_pw_aff_from_aff(one
));
3729 prn
= isl_printer_print_pw_aff(prn
, bound
);
3730 isl_pw_aff_free(bound
);
3733 prn
= isl_printer_print_str(prn
, ", ");
3735 prn
= isl_printer_print_str(prn
, ")");
3737 isl_printer_free(prn
);
3739 isl_set_free(context
);
3742 /* This function is called for each leaf in the clast of the host code.
3743 * We first specialize the schedule to the site of the leaf, compute
3744 * the size of shared memory and then print the body of host code
3745 * and the associated kernel (through a call to print_kernel_body).
3747 static void print_host_user(struct gpucode_info
*code
,
3748 struct clast_user_stmt
*u
)
3750 struct cuda_gen
*gen
= code
->user
;
3753 isl_set
*host_domain
;
3754 isl_union_map
*access
;
3755 isl_union_map
*local_sched
;
3756 isl_union_set
*arrays
;
3758 set_tile_len(gen
, u
);
3761 host_domain
= extract_entire_host_domain(u
);
3763 local_sched
= isl_union_map_intersect_range(
3764 isl_union_map_copy(gen
->sched
),
3765 isl_union_set_from_set(extend(isl_set_copy(host_domain
),
3766 gen
->untiled_len
)));
3767 access
= isl_union_map_union(isl_union_map_copy(gen
->read
),
3768 isl_union_map_copy(gen
->write
));
3769 access
= isl_union_map_apply_domain(access
,
3770 isl_union_map_copy(local_sched
));
3771 arrays
= isl_union_map_range(access
);
3773 print_indent(code
->dst
, code
->indent
);
3774 fprintf(code
->dst
, "dim3 k%d_dimBlock", gen
->kernel_id
);
3775 print_reverse_list(code
->dst
, gen
->n_block
, gen
->block_dim
);
3776 fprintf(code
->dst
, ";\n");
3778 gen
->tiled_sched
= tile_schedule(gen
, local_sched
);
3779 gen
->tiled_sched
= parametrize_tiled_schedule(gen
, gen
->tiled_sched
);
3780 gen
->tiled_sched
= scale_tile_loops(gen
, gen
->tiled_sched
);
3782 print_indent(code
->dst
, code
->indent
);
3783 fprintf(code
->dst
, "dim3 k%d_dimGrid", gen
->kernel_id
);
3784 print_grid_size(gen
, isl_set_params(isl_set_copy(host_domain
)));
3785 fprintf(code
->dst
, ";\n");
3787 gen
->local_sched
= isl_union_map_copy(gen
->tiled_sched
);
3789 dim
= isl_union_map_get_space(gen
->local_sched
);
3790 par
= parametrization(dim
, gen
->tiled_len
, 0, gen
->shared_len
, "g");
3791 gen
->local_sched
= isl_union_map_intersect_range(gen
->local_sched
,
3792 isl_union_set_from_set(par
));
3794 gen
->local_sched
= thread_tile_schedule(gen
, gen
->local_sched
);
3795 gen
->local_sched
= scale_thread_tile_loops(gen
, gen
->local_sched
);
3797 gen
->private_access
= NULL
;
3798 compute_shared_sched(gen
);
3799 gen
->privatization
= compute_privatization(gen
);
3800 group_references(gen
);
3801 compute_private_size(gen
);
3802 localize_bounds(gen
, host_domain
);
3804 gen
->local_sched
= interchange_for_unroll(gen
, gen
->local_sched
);
3806 print_kernel_launch(gen
, arrays
);
3808 fprintf(gen
->cuda
.kernel_c
, "{\n");
3810 print_kernel_body(gen
, host_domain
, gen
->tiled_sched
);
3812 fprintf(gen
->cuda
.kernel_c
, "}\n");
3814 free_local_array_info(gen
);
3815 isl_map_free(gen
->privatization
);
3816 isl_union_map_free(gen
->private_access
);
3817 isl_union_map_free(gen
->local_sched
);
3818 isl_union_map_free(gen
->tiled_sched
);
3819 isl_union_map_free(gen
->shared_sched
);
3820 isl_union_map_free(gen
->shared_proj
);
3821 isl_union_set_free(arrays
);
3822 isl_set_free(host_domain
);
3824 free(gen
->tile_size
);
3828 /* Use CLooG to generate code for the outer gen->tile_first loops
3829 * of the global schedule in gen->sched.
3830 * The pretty printing of this code is handled by gpu_print_host_stmt,
3831 * which calls print_host_user for each kernel invocation location.
3833 static void print_cloog_host_code(struct cuda_gen
*gen
)
3837 isl_union_map
*sched
;
3838 CloogOptions
*options
;
3839 CloogDomain
*cloog_context
;
3840 CloogUnionDomain
*ud
;
3842 struct clast_stmt
*stmt
;
3845 options
= cloog_options_malloc(gen
->state
);
3846 options
->language
= CLOOG_LANGUAGE_C
;
3848 options
->strides
= 1;
3849 options
->stop
= gen
->tile_first
;
3850 options
->f
= gen
->untiled_len
;
3851 options
->l
= gen
->untiled_len
;
3852 options
->save_domains
= 1;
3853 options
->noscalars
= 1;
3855 sched
= isl_union_map_copy(gen
->sched
);
3856 ud
= cloog_union_domain_from_isl_union_map(sched
);
3857 for (i
= 0; i
< options
->stop
; ++i
) {
3858 snprintf(name
, sizeof(name
), "h%d", i
);
3859 ud
= cloog_union_domain_set_name(ud
, CLOOG_SCAT
, i
, name
);
3861 context
= isl_set_copy(gen
->context
);
3862 cloog_context
= cloog_domain_from_isl_set(context
);
3863 input
= cloog_input_alloc(cloog_context
, ud
);
3865 stmt
= cloog_clast_create_from_input(input
, options
);
3867 gen
->code
.indent
= 0;
3868 gen
->code
.dst
= gen
->cuda
.host_c
;
3869 gen
->code
.print_user_stmt
= NULL
;
3870 gen
->code
.print_user_stmt_list
= &print_host_user
;
3871 gen
->code
.print_for_head
= NULL
;
3872 gen
->code
.print_for_foot
= NULL
;
3873 gen
->code
.user
= gen
;
3874 gpu_print_host_stmt(&gen
->code
, stmt
);
3876 cloog_clast_free(stmt
);
3877 cloog_options_free(options
);
3878 fprintf(gen
->cuda
.host_c
, "\n");
3881 void print_cuda_macros(struct cuda_gen
*gen
)
3883 const char *macros
=
3884 "#define cudaCheckReturn(ret) assert((ret) == cudaSuccess)\n"
3885 "#define cudaCheckKernel()"
3886 " assert(cudaGetLastError() == cudaSuccess)\n\n";
3887 fputs(macros
, gen
->cuda
.host_c
);
3890 void print_host_code(struct cuda_gen
*gen
)
3892 fprintf(gen
->cuda
.host_c
, "{\n");
3893 print_cloog_macros(gen
->cuda
.host_c
);
3894 print_cloog_macros(gen
->cuda
.kernel_c
);
3896 print_cuda_macros(gen
);
3898 declare_device_arrays(gen
);
3900 allocate_device_arrays(gen
);
3901 copy_arrays_to_device(gen
);
3904 print_cloog_host_code(gen
);
3906 copy_arrays_from_device(gen
);
3907 free_device_arrays(gen
);
3909 fprintf(gen
->cuda
.host_c
, "}\n");
3912 __isl_give isl_set
*add_context_from_str(__isl_take isl_set
*set
,
3921 ctx
= isl_set_get_ctx(set
);
3922 context
= isl_set_read_from_str(ctx
, str
);
3923 context
= isl_set_align_params(context
, isl_set_get_space(set
));
3924 set
= isl_set_intersect(set
, context
);
3929 __isl_give isl_union_map
*extract_sizes_from_str(isl_ctx
*ctx
, const char *str
)
3933 return isl_union_map_read_from_str(ctx
, str
);
3936 /* Return the union of all iteration domains of the gen->stmts[i].
3938 static __isl_give isl_union_set
*extract_domain(struct cuda_gen
*gen
)
3941 isl_union_set
*domain
;
3943 domain
= isl_union_set_empty(isl_set_get_space(gen
->context
));
3944 for (i
= 0; i
< gen
->n_stmts
; ++i
) {
3947 domain_i
= isl_set_copy(gen
->stmts
[i
].domain
);
3948 domain
= isl_union_set_union(domain
,
3949 isl_union_set_from_set(domain_i
));
3955 /* Information about the outermost tilable bands in the forest of bands.
3957 * tile_len and n_parallel are only sets on band_info structures
3958 * that correspond to outermost bands. For other bands (in particular,
3959 * ancestors of the outermost bands), n_parallal is set to 0.
3961 * prefix is the (padded) schedule leading up to the outermost tilable bands.
3963 * tile_first is the number of schedule dimensions in prefix.
3965 * suffix is the schedule of the outermost tilable bands and their descendants.
3968 struct cuda_gen
*gen
;
3972 isl_union_map
*prefix
;
3973 isl_union_map
*suffix
;
3976 /* Set tile_len and n_parallel of the statement to that of
3977 * their outermost band, recorded in the band_info.
3979 static int set_stmt_tile_len(__isl_take isl_map
*map
, void *user
)
3981 struct band_info
*info
= user
;
3983 struct cuda_stmt
*stmt
;
3985 nr
= atoi(isl_map_get_tuple_name(map
, isl_dim_in
) + 2);
3986 stmt
= &info
->gen
->stmts
[nr
];
3988 stmt
->tile_len
= info
->tile_len
;
3989 stmt
->n_parallel
= info
->n_parallel
;
3996 static void list_select_outer_band(struct cuda_gen
*gen
,
3997 __isl_take isl_band_list
*list
, int pos
, struct band_info
*list_info
);
3999 /* Check if this band has any parallel loops. If so, take it as
4000 * the outermost tilable band. If not, continue looking for the
4001 * outermost tilable band in the children of the current band.
4003 static void band_select_outer_band(struct cuda_gen
*gen
,
4004 __isl_take isl_band
*band
, int pos
, struct band_info
*info
)
4006 int n
= isl_band_n_member(band
);
4009 for (n_parallel
= 0; n_parallel
< n
; ++n_parallel
)
4010 if (!isl_band_member_is_zero_distance(band
, n_parallel
))
4013 info
->n_parallel
= n_parallel
;
4016 info
->tile_first
= pos
;
4018 info
->prefix
= isl_band_get_prefix_schedule(band
);
4019 info
->suffix
= isl_union_map_flat_range_product(
4020 isl_band_get_partial_schedule(band
),
4021 isl_band_get_suffix_schedule(band
));
4022 isl_union_map_foreach_map(info
->prefix
,
4023 &set_stmt_tile_len
, info
);
4024 } else if (isl_band_has_children(band
)) {
4025 isl_band_list
*children
;
4026 children
= isl_band_get_children(band
);
4027 list_select_outer_band(gen
, children
, pos
+ n
, info
);
4030 info
->tile_first
= pos
+ n
;
4032 info
->prefix
= isl_union_map_flat_range_product(
4033 isl_band_get_prefix_schedule(band
),
4034 isl_band_get_partial_schedule(band
));
4035 info
->suffix
= isl_band_get_suffix_schedule(band
);
4036 isl_union_map_foreach_map(info
->prefix
,
4037 &set_stmt_tile_len
, info
);
4040 isl_band_free(band
);
4043 /* Comparison function that returns a non-zero value for band_infos
4044 * with different tile_len fields or different n_parallel fields.
4046 static int cmp_band(const void *p1
, const void *p2
)
4048 const struct band_info
*info1
= p1
;
4049 const struct band_info
*info2
= p2
;
4051 if (info1
->tile_len
!= info2
->tile_len
)
4052 return info1
->tile_len
- info2
->tile_len
;
4054 return info1
->n_parallel
- info2
->n_parallel
;
4057 /* Extend "umap" with coordinates with fixed value "val"
4058 * to a total length of "dst_len", assuming the original dimension is "src_len".
4060 static __isl_give isl_union_map
*extend_range(__isl_take isl_union_map
*umap
,
4061 int src_len
, int dst_len
, int val
)
4067 dim
= isl_union_map_get_space(umap
);
4068 map
= isl_map_reverse(projection(dim
, dst_len
, src_len
));
4069 for (i
= src_len
; i
< dst_len
; ++i
)
4070 map
= isl_map_fix_si(map
, isl_dim_out
, i
, val
);
4072 umap
= isl_union_map_apply_range(umap
, isl_union_map_from_map(map
));
4077 /* Group bands with the same values for tile_len and n_parallel.
4078 * The prefix schedule is then extended with a fixed coordinate that
4079 * is different for each such group.
4080 * Note that the actual values for this coordinate are not important.
4081 * The bands have already been effectively separated at a higher level
4082 * or they are independent and may be executed in parallel.
4083 * The list of band_info has been sorted before this functions is called.
4085 static void separate_bands(struct band_info
*info
, int n
)
4090 for (i
= 0; i
< n
; ++i
) {
4091 int l
= info
[i
].tile_first
;
4094 (info
[i
].tile_len
!= info
[i
- 1].tile_len
||
4095 info
[i
].n_parallel
!= info
[i
- 1].n_parallel
))
4098 info
[i
].prefix
= extend_range(info
[i
].prefix
,
4100 info
[i
].tile_first
= l
+ 1;
4104 /* Select the outermost bands in the elements of the list, align
4105 * their prefix schedules, separate bands with different values
4106 * for tile_len and/or n_parallel and then combine the resulting
4107 * prefix and suffix schedules into a single pair of prefix and
4108 * suffix schedules for the entire list.
4110 static void list_select_outer_band(struct cuda_gen
*gen
,
4111 __isl_take isl_band_list
*list
, int pos
, struct band_info
*list_info
)
4115 int n
= isl_band_list_n_band(list
);
4116 isl_ctx
*ctx
= isl_band_list_get_ctx(list
);
4117 struct band_info
*info
;
4119 isl_union_map
*prefix
;
4120 isl_union_map
*suffix
;
4123 info
= isl_calloc_array(ctx
, struct band_info
, n
);
4127 for (i
= 0; i
< n
; ++i
) {
4128 band
= isl_band_list_get_band(list
, i
);
4129 band_select_outer_band(gen
, band
, pos
, &info
[i
]);
4130 if (info
[i
].tile_first
> max_tile_first
)
4131 max_tile_first
= info
[i
].tile_first
;
4134 for (i
= 0; i
< n
; ++i
) {
4135 if (info
[i
].tile_first
== max_tile_first
)
4137 info
[i
].prefix
= extend_range(info
[i
].prefix
,
4138 info
[i
].tile_first
, max_tile_first
, 0);
4139 info
[i
].tile_first
= max_tile_first
;
4142 qsort(info
, n
, sizeof(struct band_info
), &cmp_band
);
4144 for (i
= 0; i
< n
- 1; ++i
)
4145 if (info
[i
].tile_len
!= info
[i
+ 1].tile_len
||
4146 info
[i
].n_parallel
!= info
[i
+ 1].n_parallel
)
4150 separate_bands(info
, n
);
4152 prefix
= info
[0].prefix
;
4153 suffix
= info
[0].suffix
;
4155 for (i
= 1; i
< n
; ++i
) {
4156 prefix
= isl_union_map_union(prefix
, info
[i
].prefix
);
4157 suffix
= isl_union_map_union(suffix
, info
[i
].suffix
);
4160 list_info
->tile_first
= info
[0].tile_first
;
4161 list_info
->tile_len
= -1;
4162 list_info
->prefix
= prefix
;
4163 list_info
->suffix
= suffix
;
4165 isl_band_list_free(list
);
4169 /* Set max_out to the maximal number of output dimensions over
4172 static int update_max_out(__isl_take isl_map
*map
, void *user
)
4174 int *max_out
= user
;
4175 int n_out
= isl_map_dim(map
, isl_dim_out
);
4177 if (n_out
> *max_out
)
4184 struct align_range_data
{
4189 /* Extend the dimension of the range of the given map to data->max_out and
4190 * then add the result to data->res.
4192 static int map_align_range(__isl_take isl_map
*map
, void *user
)
4194 struct align_range_data
*data
= user
;
4198 int n_out
= isl_map_dim(map
, isl_dim_out
);
4200 dim
= isl_union_map_get_space(data
->res
);
4201 proj
= isl_map_reverse(projection(dim
, data
->max_out
, n_out
));
4202 for (i
= n_out
; i
< data
->max_out
; ++i
)
4203 proj
= isl_map_fix_si(proj
, isl_dim_out
, i
, 0);
4205 map
= isl_map_apply_range(map
, proj
);
4207 data
->res
= isl_union_map_add_map(data
->res
, map
);
4212 /* Extend the ranges of the maps in the union map such they all have
4213 * the same dimension.
4215 static __isl_give isl_union_map
*align_range(__isl_take isl_union_map
*umap
)
4217 struct align_range_data data
;
4220 isl_union_map_foreach_map(umap
, &update_max_out
, &data
.max_out
);
4222 data
.res
= isl_union_map_empty(isl_union_map_get_space(umap
));
4223 isl_union_map_foreach_map(umap
, &map_align_range
, &data
);
4225 isl_union_map_free(umap
);
4229 /* Select the outermost tilable band that (by construction)
4230 * has at least one parallel loop.
4231 * The starting position of the aligned band is stored in the pair
4233 * The sizes and number of parallel loops may be different in different
4234 * parts of the band forest and are therefore stored in the cuda_stmts.
4236 * Return the complete schedule, with the tilable bands aligned
4237 * at gen->tile_first and padded with zero, if needed.
4239 static __isl_give isl_union_map
*select_outer_tilable_band(struct cuda_gen
*gen
,
4240 __isl_keep isl_schedule
*schedule
)
4242 isl_band_list
*list
;
4243 struct band_info info
;
4245 gen
->n_parallel
= 0;
4248 list
= isl_schedule_get_band_forest(schedule
);
4250 list_select_outer_band(gen
, list
, 0, &info
);
4252 gen
->tile_first
= info
.tile_first
;
4253 info
.suffix
= align_range(info
.suffix
);
4255 return isl_union_map_flat_range_product(info
.prefix
, info
.suffix
);
4258 /* Set gen->untiled_len to the number of scheduling dimensions
4259 * for the schedule of the first domain.
4260 * We assume here that this number is the same for all domains.
4262 static int set_untiled_len(__isl_take isl_map
*map
, void *user
)
4264 unsigned *untiled_len
= user
;
4266 *untiled_len
= isl_map_dim(map
, isl_dim_out
);
4272 /* Compute an appropriate schedule based on the accesses in
4273 * gen->read and gen->write.
4275 * We first compute dependences and then use those to compute
4276 * a schedule that has a parallel loop in each tilable band.
4277 * Finally, we select the outermost tilable band.
4279 static void compute_schedule(struct cuda_gen
*gen
,
4280 __isl_take isl_union_map
*sched
)
4282 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
4283 isl_union_set
*domain
;
4284 isl_union_map
*empty
;
4285 isl_union_map
*dep_raw
, *dep2
, *dep3
, *dep
;
4286 isl_union_map
*uninitialized
;
4287 isl_schedule
*schedule
;
4289 empty
= isl_union_map_empty(isl_union_map_get_space(sched
));
4291 isl_union_map_compute_flow(isl_union_map_copy(gen
->read
),
4292 isl_union_map_copy(gen
->write
), empty
,
4293 isl_union_map_copy(sched
),
4294 &dep_raw
, NULL
, &uninitialized
, NULL
);
4295 isl_union_map_compute_flow(isl_union_map_copy(gen
->write
),
4296 isl_union_map_copy(gen
->write
),
4297 isl_union_map_copy(gen
->read
),
4298 isl_union_map_copy(sched
),
4299 &dep2
, &dep3
, NULL
, NULL
);
4300 isl_union_map_free(sched
);
4302 gen
->copy_in
= isl_union_map_range(uninitialized
);
4304 dep
= isl_union_map_union(dep2
, dep3
);
4305 dep
= isl_union_map_union(dep
, dep_raw
);
4306 dep
= isl_union_map_coalesce(dep
);
4308 domain
= extract_domain(gen
);
4309 schedule
= isl_union_set_compute_schedule(isl_union_set_copy(domain
),
4310 isl_union_map_copy(dep
), dep
);
4312 sched
= select_outer_tilable_band(gen
, schedule
);
4314 isl_union_map_foreach_map(sched
, &set_untiled_len
, &gen
->untiled_len
);
4315 sched
= isl_union_map_intersect_domain(sched
, domain
);
4318 isl_schedule_free(schedule
);
4321 static struct cuda_stmt_access
**expr_extract_access(struct pet_expr
*expr
,
4322 struct cuda_stmt_access
**next_access
)
4324 struct cuda_stmt_access
*access
;
4325 isl_ctx
*ctx
= isl_map_get_ctx(expr
->acc
.access
);
4327 access
= isl_alloc_type(ctx
, struct cuda_stmt_access
);
4329 access
->next
= NULL
;
4330 access
->read
= expr
->acc
.read
;
4331 access
->write
= expr
->acc
.write
;
4332 access
->access
= isl_map_copy(expr
->acc
.access
);
4334 *next_access
= access
;
4335 next_access
= &(*next_access
)->next
;
4339 static struct cuda_stmt_access
**expr_extract_accesses(struct pet_expr
*expr
,
4340 struct cuda_stmt_access
**next_access
)
4344 for (i
= 0; i
< expr
->n_arg
; ++i
)
4345 next_access
= expr_extract_accesses(expr
->args
[i
],
4348 if (expr
->type
== pet_expr_access
)
4349 next_access
= expr_extract_access(expr
, next_access
);
4354 static void pet_stmt_extract_accesses(struct cuda_stmt
*stmt
)
4356 struct cuda_stmt_access
**next_access
= &stmt
->accesses
;
4358 stmt
->accesses
= NULL
;
4359 expr_extract_accesses(stmt
->body
, next_access
);
4362 /* Return an array of cuda_stmt representing the statements in "scop".
4364 static struct cuda_stmt
*extract_stmts(isl_ctx
*ctx
, struct pet_scop
*scop
,
4365 __isl_keep isl_set
*context
)
4368 struct cuda_stmt
*stmts
;
4370 stmts
= isl_calloc_array(ctx
, struct cuda_stmt
, scop
->n_stmt
);
4373 for (i
= 0; i
< scop
->n_stmt
; ++i
) {
4374 struct cuda_stmt
*s
= &stmts
[i
];
4376 s
->domain
= isl_set_copy(scop
->stmts
[i
]->domain
);
4377 s
->domain
= isl_set_intersect_params(s
->domain
,
4378 isl_set_copy(context
));
4379 s
->body
= scop
->stmts
[i
]->body
;
4380 pet_stmt_extract_accesses(s
);
4386 /* Replace the scop in the "input" file by equivalent code
4387 * that uses the GPU. "scop" is assumed to correspond to this scop.
4389 * We first compute a schedule that respects the dependences
4390 * of the original program and select the outermost band
4391 * of tilable dimensions that has at least one parallel loop.
4392 * We then have three blocks of dimensions
4396 * The tilable band "B" is first tiled according to "tile" sizes, resulting
4401 * For each iteration of the T loop and for each array, we compute
4402 * the array elements accessed by that iteration, construct a rectangular
4403 * box around it and shift it to the origin. The result is used
4404 * as shared memory for the array.
4406 * We then split off at most 2 parallel loops from the T loops and
4407 * at most 3 parallel loops from the P loops
4411 * The T1/P1 loops are then tiled or "wrapped" over the blocks/threads,
4412 * according to "grid"/"block" sizes.
4414 * H T1T T1P T2 P1T P1P P2 G
4416 * Finally, the T1P and P1P iterators are equated to the block and
4417 * thread dimensions respectively and so are effectively removed.
4418 * The H loops are run on the host. The T1T, T2, P1T, P2 and G loops
4419 * are run on the GPU.
4421 * Code is generated in three stages. We first generate code for the
4422 * host (the H loops), with iterators h%d. Then, for each leaf node
4423 * of the resulting AST, we generate code for the shared loops (up to
4424 * and including T2), with iterators g%d and after equating the H loops
4425 * to h%d parameters and the T1P loops to the block dimensions.
4426 * Finally, we generate code for the remaining loops in a similar fashion.
4428 int cuda_pet(isl_ctx
*ctx
, struct pet_scop
*scop
, struct ppcg_options
*options
,
4431 isl_union_map
*sched
;
4432 struct cuda_gen gen
;
4437 scop
= pet_scop_align_params(scop
);
4440 gen
.context
= isl_set_copy(scop
->context
);
4441 gen
.context
= add_context_from_str(gen
.context
, options
->ctx
);
4442 gen
.sizes
= extract_sizes_from_str(ctx
, options
->sizes
);
4443 gen
.n_stmts
= scop
->n_stmt
;
4444 gen
.stmts
= extract_stmts(ctx
, scop
, gen
.context
);
4445 gen
.read
= pet_scop_collect_reads(scop
);
4446 gen
.write
= pet_scop_collect_writes(scop
);
4447 gen
.options
= options
;
4448 gen
.state
= cloog_isl_state_malloc(gen
.ctx
);
4451 cuda_open_files(&gen
.cuda
, input
);
4453 collect_array_info(&gen
);
4455 sched
= pet_scop_collect_schedule(scop
);
4457 compute_schedule(&gen
, sched
);
4459 print_host_code(&gen
);
4461 cloog_state_free(gen
.state
);
4462 clear_cuda_gen(&gen
);
4464 cuda_close_files(&gen
.cuda
);