2 * Copyright 2010-2011 INRIA Saclay
4 * Use of this software is governed by the GNU LGPLv2.1 license
6 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
7 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
14 #include <isl/polynomial.h>
15 #include <isl/union_set.h>
19 #include <isl/schedule.h>
20 #include <isl/options.h>
21 #include <cloog/isl/cloog.h>
24 #include "cuda_common.h"
27 #include "scoplib_isl.h"
28 #include "ppcg_options.h"
30 /* The fields stride, shift and shift_map only contain valid information
32 * If so, they express that current index is such that if you add shift,
33 * then the result is always a multiple of stride.
34 * shift_map contains the mapping
36 * i -> (i + shift)/stride
38 struct cuda_array_bound
{
43 isl_qpolynomial
*shift
;
44 isl_basic_map
*shift_map
;
47 struct cuda_array_info
;
49 /* A group of array references in a kernel that should be handled together.
50 * If private_bound is not NULL, then it is mapped to registers.
51 * Otherwise, if shared_bound is not NULL, it is mapped to shared memory.
52 * Otherwise, it is accesses from global memory.
54 struct cuda_array_ref_group
{
55 /* The references in this group access this array. */
56 struct cuda_array_info
*array
;
57 /* Position of this group in the list of reference groups of array. */
60 /* The following fields are use during the construction of the groups.
61 * access is the combined access relation relative to the shared
63 * write is set if any access in the group is a write.
68 /* For each index, size and offset of piece in shared memory. */
69 struct cuda_array_bound
*shared_bound
;
71 /* For each index, size and offset of piece in private memory. */
72 struct cuda_array_bound
*private_bound
;
74 /* References in this group; point to elements of a linked list. */
76 struct cuda_stmt_access
**refs
;
79 struct cuda_array_info
{
81 /* Name of the array. */
83 /* Number of indices. */
85 /* For each index, a bound on the array in that direction. */
86 isl_pw_qpolynomial_fold
**bound
;
87 /* For each index, bound[i] specialized to the current kernel. */
88 isl_pw_qpolynomial_fold
**local_bound
;
90 /* All references to this array; point to elements of a linked list. */
92 struct cuda_stmt_access
**refs
;
94 /* The reference groups associated to this array. */
96 struct cuda_array_ref_group
**groups
;
98 /* Last shared memory tile dimension that affects tile of this array. */
100 /* Dimension at which copying to/from shared memory is printed.
101 * if >= 0, then the value is >= last_shared
102 * if -1, then the copying is done at the leaf level.
104 int print_shared_level
;
107 /* Print the name of the local copy of a given group of array references.
109 static void print_array_name(FILE *out
, struct cuda_array_ref_group
*group
)
113 if (group
->private_bound
)
114 fprintf(out
, "private_");
115 else if (group
->shared_bound
)
116 fprintf(out
, "shared_");
119 fprintf(out
, "%s", group
->array
->name
);
120 if (!global
&& group
->array
->n_group
> 1)
121 fprintf(out
, "_%d", group
->nr
);
124 /* Collect all references to the given array and store pointers to them
127 static void collect_references(struct cuda_gen
*gen
,
128 struct cuda_array_info
*array
)
134 for (i
= 0; i
< gen
->n_stmts
; ++i
) {
135 struct cuda_stmt
*stmt
= &gen
->stmts
[i
];
136 struct cuda_stmt_access
*access
;
138 for (access
= stmt
->accesses
; access
; access
= access
->next
) {
140 name
= isl_map_get_tuple_name(access
->access
,
142 if (name
&& !strcmp(array
->name
, name
))
148 array
->refs
= isl_alloc_array(gen
->ctx
, struct cuda_stmt_access
*, n
);
152 for (i
= 0; i
< gen
->n_stmts
; ++i
) {
153 struct cuda_stmt
*stmt
= &gen
->stmts
[i
];
154 struct cuda_stmt_access
*access
;
156 for (access
= stmt
->accesses
; access
; access
= access
->next
) {
158 name
= isl_map_get_tuple_name(access
->access
,
160 if (!name
|| strcmp(array
->name
, name
))
163 array
->refs
[n
++] = access
;
168 static struct cuda_array_bound
*create_bound_list(isl_ctx
*ctx
, int n_index
)
171 struct cuda_array_bound
*bound
;
173 bound
= isl_alloc_array(ctx
, struct cuda_array_bound
, n_index
);
176 for (i
= 0; i
< n_index
; ++i
) {
177 isl_int_init(bound
[i
].size
);
179 isl_int_init(bound
[i
].stride
);
180 bound
[i
].shift
= NULL
;
181 bound
[i
].shift_map
= NULL
;
187 static void free_bound_list(struct cuda_array_bound
*bound
, int n_index
)
194 for (j
= 0; j
< n_index
; ++j
) {
195 isl_int_clear(bound
[j
].size
);
196 isl_int_clear(bound
[j
].stride
);
197 isl_qpolynomial_free(bound
[j
].lb
);
198 isl_qpolynomial_free(bound
[j
].shift
);
199 isl_basic_map_free(bound
[j
].shift_map
);
204 /* Compute bounds on the host arrays based on the accessed elements
205 * and collect all references to the array.
207 static int extract_array_info(__isl_take isl_set
*array
, void *user
)
210 struct cuda_gen
*gen
= (struct cuda_gen
*)user
;
213 isl_pw_qpolynomial_fold
**bounds
;
214 isl_pw_qpolynomial_fold
**local_bounds
;
216 n_index
= isl_set_dim(array
, isl_dim_set
);
217 name
= isl_set_get_tuple_name(array
);
218 bounds
= isl_alloc_array(isl_set_get_ctx(array
),
219 isl_pw_qpolynomial_fold
*, n_index
);
221 local_bounds
= isl_calloc_array(isl_set_get_ctx(array
),
222 isl_pw_qpolynomial_fold
*, n_index
);
223 assert(local_bounds
);
224 gen
->array
[gen
->n_array
].dim
= isl_set_get_dim(array
);
225 gen
->array
[gen
->n_array
].name
= strdup(name
);
226 gen
->array
[gen
->n_array
].n_index
= n_index
;
227 gen
->array
[gen
->n_array
].bound
= bounds
;
228 gen
->array
[gen
->n_array
].local_bound
= local_bounds
;
230 for (i
= 0; i
< n_index
; ++i
) {
232 isl_qpolynomial
*qp
, *one
;
233 isl_pw_qpolynomial
*pwqp
;
234 isl_pw_qpolynomial_fold
*pwf
;
236 dim
= isl_set_get_dim(array
);
237 one
= isl_qpolynomial_one(isl_dim_copy(dim
));
238 qp
= isl_qpolynomial_var(dim
, isl_dim_set
, i
);
239 qp
= isl_qpolynomial_add(qp
, one
);
240 pwqp
= isl_pw_qpolynomial_alloc(isl_set_copy(array
), qp
);
241 pwf
= isl_pw_qpolynomial_bound(pwqp
, isl_fold_max
, NULL
);
242 pwf
= isl_pw_qpolynomial_fold_gist(pwf
,
243 isl_set_copy(gen
->context
));
248 collect_references(gen
, &gen
->array
[gen
->n_array
]);
256 void collect_array_info(struct cuda_gen
*gen
)
258 isl_union_set
*arrays
;
260 arrays
= isl_union_map_range(isl_union_map_copy(gen
->read
));
261 arrays
= isl_union_set_union(arrays
,
262 isl_union_map_range(isl_union_map_copy(gen
->write
)));
263 arrays
= isl_union_set_coalesce(arrays
);
265 gen
->n_array
= isl_union_set_n_set(arrays
);
266 gen
->array
= isl_alloc_array(gen
->ctx
,
267 struct cuda_array_info
, gen
->n_array
);
270 isl_union_set_foreach_set(arrays
, &extract_array_info
, gen
);
271 isl_union_set_free(arrays
);
274 static void free_array_info(struct cuda_gen
*gen
)
278 for (i
= 0; i
< gen
->n_array
; ++i
) {
279 int n_index
= gen
->array
[i
].n_index
;
280 free(gen
->array
[i
].name
);
281 for (j
= 0; j
< n_index
; ++j
) {
282 isl_pw_qpolynomial_fold_free(gen
->array
[i
].bound
[j
]);
283 isl_pw_qpolynomial_fold_free(gen
->array
[i
].local_bound
[j
]);
285 isl_dim_free(gen
->array
[i
].dim
);
286 free(gen
->array
[i
].bound
);
287 free(gen
->array
[i
].local_bound
);
288 free(gen
->array
[i
].refs
);
293 static void declare_device_arrays(struct cuda_gen
*gen
)
297 for (i
= 0; i
< gen
->n_array
; ++i
)
298 fprintf(gen
->cuda
.host_c
, "%s *dev_%s;\n",
299 gen
->options
->type
, gen
->array
[i
].name
);
302 static void print_array_size(struct cuda_gen
*gen
, FILE *out
,
303 struct cuda_array_info
*array
)
308 prn
= isl_printer_to_file(gen
->ctx
, out
);
309 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
310 for (i
= 0; i
< array
->n_index
; ++i
) {
311 prn
= isl_printer_print_str(prn
, "(");
312 prn
= isl_printer_print_pw_qpolynomial_fold(prn
,
314 prn
= isl_printer_print_str(prn
, ") * ");
316 prn
= isl_printer_print_str(prn
, "sizeof(");
317 prn
= isl_printer_print_str(prn
, gen
->options
->type
);
318 prn
= isl_printer_print_str(prn
, ")");
319 isl_printer_free(prn
);
322 static void allocate_device_arrays(struct cuda_gen
*gen
)
326 for (i
= 0; i
< gen
->n_array
; ++i
) {
327 fprintf(gen
->cuda
.host_c
, "cudaMalloc(&dev_%s, ",
329 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
330 fprintf(gen
->cuda
.host_c
, ");\n");
334 static void free_device_arrays(struct cuda_gen
*gen
)
338 for (i
= 0; i
< gen
->n_array
; ++i
)
339 fprintf(gen
->cuda
.host_c
, "cudaFree(dev_%s);\n",
343 static void copy_arrays_to_device(struct cuda_gen
*gen
)
347 for (i
= 0; i
< gen
->n_array
; ++i
) {
352 dim
= isl_dim_copy(gen
->array
[i
].dim
);
353 read_i
= isl_union_set_extract_set(gen
->copy_in
, dim
);
354 empty
= isl_set_fast_is_empty(read_i
);
355 isl_set_free(read_i
);
359 fprintf(gen
->cuda
.host_c
, "assert(sizeof(%s) == ",
361 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
362 fprintf(gen
->cuda
.host_c
, ");\n");
363 fprintf(gen
->cuda
.host_c
, "cudaMemcpy(dev_%s, %s, ",
364 gen
->array
[i
].name
, gen
->array
[i
].name
);
365 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
366 fprintf(gen
->cuda
.host_c
, ", cudaMemcpyHostToDevice);\n");
370 static void copy_arrays_from_device(struct cuda_gen
*gen
)
373 isl_union_set
*write
;
374 write
= isl_union_map_range(isl_union_map_copy(gen
->write
));
376 for (i
= 0; i
< gen
->n_array
; ++i
) {
381 dim
= isl_dim_copy(gen
->array
[i
].dim
);
382 write_i
= isl_union_set_extract_set(write
, dim
);
383 empty
= isl_set_fast_is_empty(write_i
);
384 isl_set_free(write_i
);
388 fprintf(gen
->cuda
.host_c
, "cudaMemcpy(%s, dev_%s, ",
389 gen
->array
[i
].name
, gen
->array
[i
].name
);
390 print_array_size(gen
, gen
->cuda
.host_c
, &gen
->array
[i
]);
391 fprintf(gen
->cuda
.host_c
, ", cudaMemcpyDeviceToHost);\n");
394 isl_union_set_free(write
);
397 static void read_sizes_from_file(struct cuda_gen
*gen
, const char *filename
,
403 file
= fopen(filename
, "r");
407 for (i
= 0; i
< len
; ++i
)
408 if (fscanf(file
, "%d", &sizes
[i
]) < 1)
414 static void reverse_list(int *list
, int len
)
419 for (i
= 0; 2 * i
< len
; ++i
) {
421 list
[i
] = list
[len
- 1 - i
];
422 list
[len
- 1 - i
] = t
;
426 /* Read user specified sizes from "tile.sizes", "block.sizes" and "grid.sizes"
427 * after filling in some potentially useful defaults.
429 static void read_sizes(struct cuda_gen
*gen
)
433 gen
->tile_size
= isl_alloc_array(gen
->ctx
, int, gen
->tile_len
);
434 assert(gen
->tile_size
);
435 for (n
= 0; n
< gen
->tile_len
; ++n
)
436 gen
->tile_size
[n
] = gen
->options
->tile_size
;
437 read_sizes_from_file(gen
, "tile.sizes", gen
->tile_size
, gen
->tile_len
);
440 gen
->n_block
= (n
<= 3) ? n
: 3;
441 switch (gen
->n_block
) {
443 gen
->block_dim
[0] = 512;
446 gen
->block_dim
[0] = 32;
447 gen
->block_dim
[1] = 16;
450 gen
->block_dim
[0] = 32;
451 gen
->block_dim
[1] = 4;
452 gen
->block_dim
[2] = 4;
455 read_sizes_from_file(gen
, "block.sizes", gen
->block_dim
, gen
->n_block
);
456 reverse_list(gen
->block_dim
, gen
->n_block
);
458 gen
->n_grid
= (n
<= 2) ? n
: 2;
459 switch (gen
->n_grid
) {
461 gen
->grid_dim
[0] = 65536;
464 gen
->grid_dim
[0] = 256;
465 gen
->grid_dim
[1] = 256;
468 read_sizes_from_file(gen
, "grid.sizes", gen
->grid_dim
, gen
->n_grid
);
469 reverse_list(gen
->grid_dim
, gen
->n_grid
);
472 static void free_stmts(struct cuda_stmt
*stmts
, int n
)
476 for (i
= 0; i
< n
; ++i
) {
477 struct cuda_stmt_access
*access
, *next
;
479 for (access
= stmts
[i
].accesses
; access
; access
= next
) {
481 isl_map_free(access
->access
);
485 isl_set_free(stmts
[i
].domain
);
491 void clear_cuda_gen(struct cuda_gen
*gen
)
493 free_stmts(gen
->stmts
, gen
->n_stmts
);
494 free_array_info(gen
);
495 isl_set_free(gen
->context
);
496 isl_union_set_free(gen
->copy_in
);
497 isl_union_map_free(gen
->sched
);
498 isl_union_map_free(gen
->read
);
499 isl_union_map_free(gen
->write
);
502 static void print_reverse_list(FILE *out
, int len
, int *list
)
506 for (i
= 0; i
< len
; ++i
) {
509 fprintf(out
, "%d", list
[len
- 1 - i
]);
513 static void print_kernel_launch(struct cuda_gen
*gen
,
514 __isl_keep isl_union_set
*arrays
)
521 print_indent(gen
->code
.dst
, gen
->code
.indent
);
522 fprintf(gen
->code
.dst
, "kernel%d <<<k%d_dimGrid, k%d_dimBlock>>> (",
523 gen
->kernel_id
, gen
->kernel_id
, gen
->kernel_id
);
524 fprintf(gen
->cuda
.kernel_c
, "__global__ void kernel%d(",
526 fprintf(gen
->cuda
.kernel_h
, "__global__ void kernel%d(",
529 for (i
= 0; i
< gen
->n_array
; ++i
) {
534 dim
= isl_dim_copy(gen
->array
[i
].dim
);
535 arr
= isl_union_set_extract_set(arrays
, dim
);
536 empty
= isl_set_fast_is_empty(arr
);
542 fprintf(gen
->code
.dst
, ", ");
543 fprintf(gen
->cuda
.kernel_c
, ", ");
544 fprintf(gen
->cuda
.kernel_h
, ", ");
547 fprintf(gen
->code
.dst
, "dev_%s", gen
->array
[i
].name
);
548 fprintf(gen
->cuda
.kernel_c
, "%s *%s",
549 gen
->options
->type
, gen
->array
[i
].name
);
550 fprintf(gen
->cuda
.kernel_h
, "%s *%s",
551 gen
->options
->type
, gen
->array
[i
].name
);
556 dim
= isl_union_set_get_dim(arrays
);
557 nparam
= isl_dim_size(dim
, isl_dim_param
);
558 for (i
= 0; i
< nparam
; ++i
) {
559 const char *name
= isl_dim_get_name(dim
, isl_dim_param
, i
);
561 fprintf(gen
->code
.dst
, ", ");
562 fprintf(gen
->cuda
.kernel_c
, ", ");
563 fprintf(gen
->cuda
.kernel_h
, ", ");
565 fprintf(gen
->code
.dst
, "%s", name
);
566 fprintf(gen
->cuda
.kernel_c
, "int %s", name
);
567 fprintf(gen
->cuda
.kernel_h
, "int %s", name
);
572 for (i
= 0; i
< gen
->tile_first
; ++i
) {
574 fprintf(gen
->code
.dst
, ", ");
575 fprintf(gen
->cuda
.kernel_c
, ", ");
576 fprintf(gen
->cuda
.kernel_h
, ", ");
578 fprintf(gen
->code
.dst
, "h%d", i
);
579 fprintf(gen
->cuda
.kernel_c
, "int h%d", i
);
580 fprintf(gen
->cuda
.kernel_h
, "int h%d", i
);
584 fprintf(gen
->code
.dst
, ");\n");
585 fprintf(gen
->cuda
.kernel_c
, ")\n");
586 fprintf(gen
->cuda
.kernel_h
, ");\n");
589 /* Construct a map from a domain of dimensionality "len"
590 * to a domain of dimensionality "len" + "tile_len" that tiles
591 * the "tile_len" coordinates starting at "first".
592 * In particular, [s_i] -> [s_i / tile_size[i], s_i % tile_size[i]].
593 * "dim" prescribes the parameters.
595 static __isl_give isl_map
*tile(__isl_take isl_dim
*dim
, int len
,
596 int first
, int tile_len
, int *tile_size
)
605 dim
= isl_dim_add(dim
, isl_dim_in
, len
);
606 dim
= isl_dim_add(dim
, isl_dim_out
, len
+ tile_len
);
607 bmap
= isl_basic_map_universe(isl_dim_copy(dim
));
609 for (i
= 0; i
< len
- tile_len
; ++i
) {
610 int j
= i
< first
? i
: i
+ tile_len
;
611 int k
= i
< first
? i
: i
+ 2 * tile_len
;
613 c
= isl_equality_alloc(isl_dim_copy(dim
));
614 isl_int_set_si(v
, -1);
615 isl_constraint_set_coefficient(c
, isl_dim_in
, j
, v
);
616 isl_int_set_si(v
, 1);
617 isl_constraint_set_coefficient(c
, isl_dim_out
, k
, v
);
618 bmap
= isl_basic_map_add_constraint(bmap
, c
);
621 for (i
= 0; i
< tile_len
; ++i
) {
622 c
= isl_equality_alloc(isl_dim_copy(dim
));
623 isl_int_set_si(v
, -1);
624 isl_constraint_set_coefficient(c
, isl_dim_in
, first
+ i
, v
);
625 isl_int_set_si(v
, tile_size
[i
]);
626 isl_constraint_set_coefficient(c
, isl_dim_out
, first
+ i
, v
);
627 isl_int_set_si(v
, 1);
628 isl_constraint_set_coefficient(c
, isl_dim_out
,
629 first
+ i
+ tile_len
, v
);
630 bmap
= isl_basic_map_add_constraint(bmap
, c
);
632 c
= isl_inequality_alloc(isl_dim_copy(dim
));
633 isl_int_set_si(v
, 1);
634 isl_constraint_set_coefficient(c
, isl_dim_out
,
635 first
+ i
+ tile_len
, v
);
636 bmap
= isl_basic_map_add_constraint(bmap
, c
);
638 c
= isl_inequality_alloc(isl_dim_copy(dim
));
639 isl_int_set_si(v
, -1);
640 isl_constraint_set_coefficient(c
, isl_dim_out
,
641 first
+ i
+ tile_len
, v
);
642 isl_int_set_si(v
, tile_size
[i
] - 1);
643 isl_constraint_set_constant(c
, v
);
644 bmap
= isl_basic_map_add_constraint(bmap
, c
);
650 return isl_map_from_basic_map(bmap
);
653 /* Construct a map from a domain of dimensionality "len"
654 * to a domain of dimensionality "len" + "wrap_len" that "wraps"
655 * the "wrap_len" coordinates starting at "first" according to "wrap_size".
656 * In particular, [s_i] -> [s_i, s_i % wrap_size[i]].
657 * To do so, we need extra variables corresponding to [s_i / wrap_size[i]],
658 * that are projected out at the end.
659 * "dim" prescribes the parameters.
661 static __isl_give isl_map
*wrap(__isl_take isl_dim
*dim
, int len
,
662 int first
, int wrap_len
, int *wrap_size
)
668 dim
= isl_dim_add(dim
, isl_dim_in
, len
);
669 dim
= isl_dim_add(dim
, isl_dim_out
, len
+ 2 * wrap_len
);
670 bmap
= isl_basic_map_universe(isl_dim_copy(dim
));
672 for (i
= 0; i
< len
; ++i
) {
673 int k
= i
< first
+ wrap_len
? i
: i
+ 2 * wrap_len
;
675 c
= isl_equality_alloc(isl_dim_copy(dim
));
676 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, -1);
677 isl_constraint_set_coefficient_si(c
, isl_dim_out
, k
, 1);
678 bmap
= isl_basic_map_add_constraint(bmap
, c
);
681 for (i
= 0; i
< wrap_len
; ++i
) {
682 c
= isl_equality_alloc(isl_dim_copy(dim
));
683 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
685 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
686 first
+ wrap_len
+ i
, 1);
687 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
688 first
+ 2 * wrap_len
+ i
, wrap_size
[i
]);
689 bmap
= isl_basic_map_add_constraint(bmap
, c
);
691 c
= isl_inequality_alloc(isl_dim_copy(dim
));
692 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
693 first
+ wrap_len
+ i
, 1);
694 bmap
= isl_basic_map_add_constraint(bmap
, c
);
696 c
= isl_inequality_alloc(isl_dim_copy(dim
));
697 isl_constraint_set_coefficient_si(c
, isl_dim_out
,
698 first
+ wrap_len
+ i
, -1);
699 isl_constraint_set_constant_si(c
, wrap_size
[i
] - 1);
700 bmap
= isl_basic_map_add_constraint(bmap
, c
);
705 bmap
= isl_basic_map_project_out(bmap
, isl_dim_out
,
706 first
+ 2 * wrap_len
, wrap_len
);
708 return isl_map_from_basic_map(bmap
);
711 /* Add "n" parameters named prefix%d.
713 static __isl_give isl_set
*add_params( __isl_take isl_set
*set
,
714 int n
, const char *prefix
)
720 nparam
= isl_set_dim(set
, isl_dim_param
);
721 set
= isl_set_add_dims(set
, isl_dim_param
, n
);
723 for (i
= 0; i
< n
; ++i
) {
724 snprintf(name
, sizeof(name
), "%s%d", prefix
, i
);
725 set
= isl_set_set_dim_name(set
, isl_dim_param
,
732 /* Equate the "n" dimensions of "set" starting at "first" to
733 * freshly created parameters named prefix%d.
735 static __isl_give isl_set
*parametrize(__isl_take isl_set
*set
,
736 int first
, int n
, const char *prefix
)
745 nparam
= isl_set_dim(set
, isl_dim_param
);
747 set
= add_params(set
, n
, prefix
);
749 dim
= isl_set_get_dim(set
);
750 bset
= isl_basic_set_universe(isl_dim_copy(dim
));
754 for (i
= 0; i
< n
; ++i
) {
755 c
= isl_equality_alloc(isl_dim_copy(dim
));
756 isl_int_set_si(v
, -1);
757 isl_constraint_set_coefficient(c
, isl_dim_param
, nparam
+ i
, v
);
758 isl_int_set_si(v
, 1);
759 isl_constraint_set_coefficient(c
, isl_dim_set
, first
+ i
, v
);
760 bset
= isl_basic_set_add_constraint(bset
, c
);
766 return isl_set_intersect(set
, isl_set_from_basic_set(bset
));
769 static __isl_give isl_set
*parametrization(__isl_take isl_dim
*dim
,
770 int len
, int first
, int n
, const char *prefix
)
774 dim
= isl_dim_add(dim
, isl_dim_set
, len
);
775 set
= isl_set_universe(dim
);
777 return parametrize(set
, first
, n
, prefix
);
780 /* Tile the B loops over the tile sizes and then tile/wrap
781 * the T1 loops over the blocks.
783 static __isl_give isl_union_map
*tile_schedule(struct cuda_gen
*gen
,
784 __isl_take isl_union_map
*sched
)
787 isl_map
*tiling
, *block_tiling
;
789 dim
= isl_union_map_get_dim(sched
);
790 tiling
= tile(isl_dim_copy(dim
), gen
->untiled_len
,
791 gen
->tile_first
, gen
->tile_len
, gen
->tile_size
);
793 if (gen
->options
->wrap
)
794 block_tiling
= wrap(dim
, gen
->untiled_len
+ gen
->tile_len
,
795 gen
->tile_first
, gen
->n_grid
, gen
->grid_dim
);
797 block_tiling
= tile(dim
, gen
->untiled_len
+ gen
->tile_len
,
798 gen
->tile_first
, gen
->n_grid
, gen
->grid_dim
);
800 gen
->tiled_len
= gen
->untiled_len
+ gen
->tile_len
+ gen
->n_grid
;
802 tiling
= isl_map_apply_range(tiling
, block_tiling
);
804 sched
= isl_union_map_apply_range(sched
,
805 isl_union_map_from_map(tiling
));
807 gen
->shared_len
= gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
;
812 static __isl_give isl_union_map
*parametrize_tiled_schedule(
813 struct cuda_gen
*gen
, __isl_take isl_union_map
*sched
)
818 dim
= isl_union_map_get_dim(sched
);
819 par
= parametrization(dim
, gen
->tiled_len
, 0, gen
->tile_first
, "h");
820 sched
= isl_union_map_intersect_range(sched
,
821 isl_union_set_from_set(par
));
823 dim
= isl_union_map_get_dim(sched
);
824 par
= parametrization(dim
, gen
->tiled_len
,
825 gen
->tile_first
+ gen
->n_grid
, gen
->n_grid
, "b");
826 sched
= isl_union_map_intersect_range(sched
,
827 isl_union_set_from_set(par
));
832 /* Tile/wrap the P1 loops over the threads.
834 static __isl_give isl_union_map
*thread_tile_schedule(struct cuda_gen
*gen
,
835 __isl_take isl_union_map
*sched
)
841 dim
= isl_union_map_get_dim(sched
);
843 if (gen
->options
->wrap
)
844 tiling
= wrap(isl_dim_copy(dim
), gen
->tiled_len
,
845 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
847 tiling
= tile(isl_dim_copy(dim
), gen
->tiled_len
,
848 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
849 gen
->thread_tiled_len
= gen
->tiled_len
+ gen
->n_block
;
851 sched
= isl_union_map_apply_range(sched
,
852 isl_union_map_from_map(tiling
));
854 par
= parametrization(dim
, gen
->thread_tiled_len
,
855 gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
+ gen
->n_block
,
857 sched
= isl_union_map_intersect_range(sched
,
858 isl_union_set_from_set(par
));
860 gen
->shared_len
= gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
;
865 /* If the user asked for it, scale the shared memory tile loops
866 * (T1P and T2) of "sched" by gen->tile_size[i].
867 * If we are not performing "wrapping", then additionally scale the T1P
868 * loops by gen->grid_dim[i].
870 static __isl_give isl_union_map
*scale_tile_loops(struct cuda_gen
*gen
,
871 __isl_take isl_union_map
*sched
)
875 isl_basic_map
*scale
;
878 if (!gen
->options
->scale_tile_loops
)
881 dim
= isl_union_map_get_dim(sched
);
882 dim
= isl_dim_add(dim
, isl_dim_in
, gen
->tiled_len
);
883 dim
= isl_dim_add(dim
, isl_dim_out
, gen
->tiled_len
);
884 scale
= isl_basic_map_universe(isl_dim_copy(dim
));
886 for (i
= 0; i
< gen
->tiled_len
; ++i
) {
889 if (i
>= gen
->tile_first
&& i
< gen
->tile_first
+ gen
->n_grid
) {
890 f
= gen
->tile_size
[i
- gen
->tile_first
];
891 if (!gen
->options
->wrap
)
892 f
*= gen
->grid_dim
[i
- gen
->tile_first
];
893 } else if (i
>= gen
->tile_first
+ gen
->n_grid
&&
894 i
< gen
->tile_first
+ gen
->n_grid
+ gen
->tile_len
) {
895 f
= gen
->tile_size
[i
- (gen
->tile_first
+ gen
->n_grid
)];
898 c
= isl_equality_alloc(isl_dim_copy(dim
));
899 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
900 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
901 scale
= isl_basic_map_add_constraint(scale
, c
);
906 sched
= isl_union_map_apply_range(sched
,
907 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
912 /* If we are not performing "wrapping" and if the user asked for it,
913 * scale the thread tile loops (P1T) of "sched" by gen->block_dim[i].
915 static __isl_give isl_union_map
*scale_thread_tile_loops(struct cuda_gen
*gen
,
916 __isl_take isl_union_map
*sched
)
920 isl_basic_map
*scale
;
923 if (gen
->options
->wrap
)
925 if (!gen
->options
->scale_tile_loops
)
928 dim
= isl_union_map_get_dim(sched
);
929 dim
= isl_dim_add(dim
, isl_dim_in
, gen
->thread_tiled_len
);
930 dim
= isl_dim_add(dim
, isl_dim_out
, gen
->thread_tiled_len
);
931 scale
= isl_basic_map_universe(isl_dim_copy(dim
));
933 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
) {
936 if (i
>= gen
->shared_len
&&
937 i
< gen
->shared_len
+ gen
->n_block
)
938 f
= gen
->block_dim
[i
- gen
->shared_len
];
940 c
= isl_equality_alloc(isl_dim_copy(dim
));
941 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
942 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
943 scale
= isl_basic_map_add_constraint(scale
, c
);
948 sched
= isl_union_map_apply_range(sched
,
949 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
954 /* If we are not performing "wrapping" and if the user asked for it,
955 * scale the "n_tile" loops starting at "first" of "sched" by gen->block_dim[i].
957 static __isl_give isl_union_map
*scale_access_tile_loops(struct cuda_gen
*gen
,
958 __isl_take isl_union_map
*sched
, int len
, int first
, int n_tile
)
962 isl_basic_map
*scale
;
965 if (gen
->options
->wrap
)
967 if (!gen
->options
->scale_tile_loops
)
970 dim
= isl_union_map_get_dim(sched
);
971 dim
= isl_dim_add(dim
, isl_dim_in
, len
);
972 dim
= isl_dim_add(dim
, isl_dim_out
, len
);
973 scale
= isl_basic_map_universe(isl_dim_copy(dim
));
975 for (i
= 0; i
< len
; ++i
) {
978 if (i
>= first
&& i
< first
+ n_tile
)
979 f
= gen
->block_dim
[i
- first
];
981 c
= isl_equality_alloc(isl_dim_copy(dim
));
982 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
983 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
984 scale
= isl_basic_map_add_constraint(scale
, c
);
989 sched
= isl_union_map_apply_range(sched
,
990 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
995 /* If print_user_stmt is set, we want to print the statements ourselves,
996 * instead of relying on the C preprocessor. If so, we need to use
997 * the stop option so that the domains will be saved on the statement
1000 static void print_cloog_shared_body(struct cuda_gen
*gen
,
1001 __isl_keep isl_set
*context
, __isl_keep isl_union_map
*sched
, int len
,
1002 void (*print_user_stmt
)(struct gpucode_info
*info
,
1003 struct clast_user_stmt
*s
),
1007 CloogOptions
*options
;
1008 CloogDomain
*cloog_context
;
1009 CloogUnionDomain
*ud
;
1011 struct clast_stmt
*stmt
;
1014 sched
= isl_union_map_copy(sched
);
1015 sched
= isl_union_map_align_params(sched
, isl_set_get_dim(context
));
1017 options
= cloog_options_malloc(gen
->state
);
1018 options
->language
= LANGUAGE_C
;
1019 options
->strides
= 1;
1023 options
->override
= 1;
1024 options
->save_domains
= 1;
1025 options
->noscalars
= 1;
1026 options
->first_unroll
= first_unroll
;
1028 ud
= cloog_union_domain_from_isl_union_map(sched
);
1029 for (i
= 0; i
< len
; ++i
) {
1030 snprintf(name
, sizeof(name
), "c%d", i
);
1031 ud
= cloog_union_domain_set_name(ud
, CLOOG_SCAT
, i
, name
);
1033 cloog_context
= cloog_domain_from_isl_set(isl_set_copy(context
));
1034 input
= cloog_input_alloc(cloog_context
, ud
);
1036 stmt
= cloog_clast_create_from_input(input
, options
);
1038 gen
->stmt_code
.indent
= gen
->kernel_code
.indent
;
1039 gen
->stmt_code
.dst
= gen
->cuda
.kernel_c
;
1040 gen
->stmt_code
.print_user_stmt
= print_user_stmt
;
1041 gen
->stmt_code
.print_user_stmt_list
= NULL
;
1042 gen
->stmt_code
.print_for_head
= NULL
;
1043 gen
->stmt_code
.print_for_foot
= NULL
;
1044 gen
->stmt_code
.user
= gen
;
1045 gpu_print_host_stmt(&gen
->stmt_code
, stmt
);
1047 cloog_clast_free(stmt
);
1048 cloog_options_free(options
);
1051 /* Add "len" parameters p[i] called prefix%d,
1052 * with bounds to 0 <= p[i] < size[i].
1054 __isl_give isl_set
*add_bounded_parameters(__isl_take isl_set
*set
,
1055 int len
, int *size
, const char *prefix
)
1061 isl_basic_set
*bset
;
1065 nparam
= isl_set_dim(set
, isl_dim_param
);
1066 set
= isl_set_add_dims(set
, isl_dim_param
, len
);
1068 for (i
= 0; i
< len
; ++i
) {
1069 snprintf(name
, sizeof(name
), "%s%d", prefix
, i
);
1070 set
= isl_set_set_dim_name(set
, isl_dim_param
,
1074 dim
= isl_set_get_dim(set
);
1075 bset
= isl_basic_set_universe(isl_dim_copy(dim
));
1079 for (i
= 0; i
< len
; ++i
) {
1080 c
= isl_inequality_alloc(isl_dim_copy(dim
));
1081 isl_int_set_si(v
, 1);
1082 isl_constraint_set_coefficient(c
, isl_dim_param
, nparam
+ i
, v
);
1083 bset
= isl_basic_set_add_constraint(bset
, c
);
1085 c
= isl_inequality_alloc(isl_dim_copy(dim
));
1086 isl_int_set_si(v
, -1);
1087 isl_constraint_set_coefficient(c
, isl_dim_param
, nparam
+ i
, v
);
1088 isl_int_set_si(v
, size
[i
] - 1);
1089 isl_constraint_set_constant(c
, v
);
1090 bset
= isl_basic_set_add_constraint(bset
, c
);
1096 return isl_set_intersect(set
, isl_set_from_basic_set(bset
));
1099 static void print_shared_body(struct cuda_gen
*gen
,
1100 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*sched
,
1101 int len
, void (*print_user_stmt
)(struct gpucode_info
*info
,
1102 struct clast_user_stmt
*s
),
1107 context
= isl_set_copy(shared_domain
);
1108 context
= parametrize(context
, 0, gen
->shared_len
, "g");
1109 context
= isl_set_project_out(context
, isl_dim_set
, 0, gen
->shared_len
);
1110 context
= add_bounded_parameters(context
,
1111 gen
->n_block
, gen
->block_dim
, "t");
1113 print_cloog_shared_body(gen
, context
, sched
, len
, print_user_stmt
,
1116 isl_set_free(context
);
1119 /* Given a tile of an array, construct a map that maps each element
1120 * of the tile to a copy of the tile shifted to the origin
1121 * (based on the lower bounds in group->private_bound or group->shared_bound).
1122 * If any of the indices is strided, then {private,shared}_bound[i].shift_map
1123 * is applied to the index first.
1124 * The domain of the resulting map is "access",
1125 * while the range space is anonymous.
1127 static __isl_give isl_map
*shift_access(__isl_take isl_set
*access
,
1128 struct cuda_array_ref_group
*group
)
1132 isl_basic_set
*bset
;
1133 isl_basic_map
*bmap
;
1134 isl_qpolynomial
*lb
;
1135 isl_basic_set
*offset
;
1136 isl_basic_map
*shift
;
1137 isl_basic_map
*pre_shift
;
1140 struct cuda_array_bound
*bounds
;
1141 int n_index
= group
->array
->n_index
;
1143 bounds
= group
->private_bound
;
1145 bounds
= group
->shared_bound
;
1147 dim
= isl_set_get_dim(access
);
1148 dim
= isl_dim_drop(dim
, isl_dim_set
, 0, n_index
);
1149 offset
= isl_basic_set_universe(dim
);
1150 for (i
= 0; i
< n_index
; ++i
) {
1151 lb
= isl_qpolynomial_copy(bounds
[i
].lb
);
1152 bmap
= isl_basic_map_from_qpolynomial(lb
);
1153 bset
= isl_basic_map_range(bmap
);
1154 offset
= isl_basic_set_flat_product(offset
, bset
);
1156 offset
= isl_basic_set_neg(offset
);
1158 dim
= isl_dim_map_from_set(isl_set_get_dim(access
));
1159 shift
= isl_basic_map_identity(dim
);
1160 shift
= isl_basic_map_set_tuple_name(shift
, isl_dim_out
, NULL
);
1162 bset
= isl_basic_set_universe(isl_set_get_dim(access
));
1163 bmap
= isl_basic_map_from_domain_and_range(bset
, offset
);
1165 shift
= isl_basic_map_sum(shift
, bmap
);
1167 dim
= isl_set_get_dim(access
);
1168 dim
= isl_dim_drop(dim
, isl_dim_set
, 0, n_index
);
1169 dim
= isl_dim_map_from_set(dim
);
1170 pre_shift
= isl_basic_map_universe(isl_dim_copy(dim
));
1171 dim
= isl_dim_add(dim
, isl_dim_in
, 1);
1172 dim
= isl_dim_add(dim
, isl_dim_out
, 1);
1173 for (i
= 0; i
< n_index
; ++i
) {
1174 if (!bounds
[i
].shift_map
)
1175 bmap
= isl_basic_map_identity(isl_dim_copy(dim
));
1177 bmap
= isl_basic_map_copy(bounds
[i
].shift_map
);
1178 pre_shift
= isl_basic_map_flat_product(pre_shift
, bmap
);
1181 name
= isl_basic_map_get_tuple_name(shift
, isl_dim_in
);
1182 pre_shift
= isl_basic_map_set_tuple_name(pre_shift
, isl_dim_in
, name
);
1183 pre_shift
= isl_basic_map_set_tuple_name(pre_shift
, isl_dim_out
, name
);
1184 shift
= isl_basic_map_apply_range(pre_shift
, shift
);
1186 sched
= isl_map_from_basic_map(shift
);
1187 sched
= isl_map_intersect_domain(sched
, access
);
1192 /* Construct a schedule for iterating over all elements in the given
1193 * piece of an array. The schedule iterates over a copy of the piece
1194 * that is shifted to the origin.
1195 * We subsequently also perform the tiling/wrapping over the threads.
1197 * In particular, we tile the final iterators so that the final thread
1198 * dimension runs over the final array dimension.
1199 * However, if those final iterators have only a single iteration,
1200 * we try to tile earlier iterators instead.
1202 static __isl_give isl_union_map
*access_schedule(struct cuda_gen
*gen
,
1203 __isl_take isl_set
*access
, struct cuda_array_ref_group
*group
)
1207 isl_union_map
*usched
;
1210 unsigned nvar
= isl_set_dim(access
, isl_dim_set
);
1214 sched
= shift_access(access
, group
);
1216 n_tile
= gen
->n_block
;
1217 if (n_tile
> nvar
) {
1219 sched
= isl_map_insert(sched
, isl_dim_out
, 0, n_tile
- nvar
);
1220 for (i
= 0; i
< n_tile
- nvar
; ++i
)
1221 sched
= isl_map_fix_si(sched
, isl_dim_out
, i
, 0);
1225 first
= nvar
- n_tile
;
1227 for (; first
> 0; first
--)
1228 if (!isl_map_plain_is_fixed(sched
, isl_dim_out
,
1229 first
+ n_tile
- 1, NULL
))
1232 dim
= isl_map_get_dim(sched
);
1233 dim
= isl_dim_drop(dim
, isl_dim_in
, 0, isl_dim_size(dim
, isl_dim_in
));
1234 dim
= isl_dim_drop(dim
, isl_dim_out
, 0, nvar
);
1235 if (gen
->options
->wrap
)
1236 tiling
= wrap(isl_dim_copy(dim
), nvar
, first
,
1237 n_tile
, gen
->block_dim
);
1239 tiling
= tile(isl_dim_copy(dim
), nvar
, first
,
1240 n_tile
, gen
->block_dim
);
1241 sched
= isl_map_apply_range(sched
, tiling
);
1243 par
= parametrization(dim
, nvar
+ n_tile
, first
+ n_tile
, n_tile
, "t");
1244 usched
= isl_union_map_from_map(sched
);
1245 usched
= isl_union_map_intersect_range(usched
,
1246 isl_union_set_from_set(par
));
1248 usched
= scale_access_tile_loops(gen
, usched
, nvar
+ n_tile
,
1254 static void print_shared_access(struct cuda_gen
*gen
,
1255 __isl_keep isl_set
*shared_domain
, __isl_take isl_set
*access
,
1256 const char *type
, struct cuda_array_ref_group
*group
)
1258 const char *array_name
;
1261 isl_union_map
*sched
;
1262 unsigned nvar
= isl_set_dim(access
, isl_dim_set
);
1265 ctx
= isl_set_get_ctx(access
);
1266 array_name
= isl_set_get_tuple_name(access
);
1267 name
= isl_alloc_array(ctx
, char,
1268 strlen(type
) + sizeof("_shared_") + strlen(array_name
) + 20);
1269 if (group
->array
->n_group
> 1)
1270 sprintf(name
, "%s_shared_%s_%d", type
, array_name
, group
->nr
);
1272 sprintf(name
, "%s_shared_%s", type
, array_name
);
1273 access
= isl_set_set_tuple_name(access
, name
);
1276 sched
= access_schedule(gen
, access
, group
);
1278 n_tile
= gen
->n_block
;
1282 print_shared_body(gen
, shared_domain
, sched
, nvar
+ n_tile
, NULL
, -1);
1284 isl_union_map_free(sched
);
1287 /* Return the union of all read (read = 1) and/or write (write = 1)
1288 * access relations in the group.
1290 static __isl_give isl_union_map
*group_access_relation(
1291 struct cuda_array_ref_group
*group
, int read
, int write
)
1294 isl_union_map
*access
;
1296 access
= isl_union_map_empty(isl_map_get_dim(group
->access
));
1297 for (i
= 0; i
< group
->n_ref
; ++i
) {
1300 if (!((read
&& group
->refs
[i
]->read
) ||
1301 (write
&& group
->refs
[i
]->write
)))
1303 map_i
= isl_map_copy(group
->refs
[i
]->access
);
1304 access
= isl_union_map_union(access
,
1305 isl_union_map_from_map(map_i
));
1311 /* Print code for reading into or writing from shared memory
1312 * the given array reference group.
1314 * sched maps the original iteration domains to the shared memory tile loops.
1316 static int print_group_shared_accesses(struct cuda_gen
*gen
,
1317 struct cuda_array_ref_group
*group
, const char *type
,
1318 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*sched
)
1321 isl_union_map
*access
;
1322 isl_union_set
*uset
;
1323 isl_set
*access_set
;
1325 if (group
->private_bound
)
1327 if (!group
->shared_bound
)
1330 read
= !strcmp(type
, "read");
1332 access
= group_access_relation(group
, read
, !read
);
1333 access
= isl_union_map_apply_domain(access
, isl_union_map_copy(sched
));
1334 uset
= isl_union_map_range(access
);
1336 if (isl_union_set_is_empty(uset
)) {
1337 isl_union_set_free(uset
);
1341 access_set
= isl_union_set_copy_set(uset
);
1342 isl_union_set_free(uset
);
1343 access_set
= isl_set_coalesce(access_set
);
1345 print_shared_access(gen
, shared_domain
, access_set
, type
, group
);
1350 /* Print code for reading into or writing from shared memory at
1351 * the given level (-1 for innermost).
1353 * If we are not printing at the innermost level, then the dimensionality
1354 * of shared_domain may be smaller than gen->shared_len.
1355 * As the rest of the code assumes that the domain of access has
1356 * gen->shared_len dimensions, we therefore may need to embed this domain
1357 * in a higher dimensional space after intersection with shared_domain.
1359 static void print_shared_accesses(struct cuda_gen
*gen
,
1360 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*access
,
1361 const char *type
, int level
)
1367 int shared_len
= isl_set_dim(shared_domain
, isl_dim_set
);
1369 isl_union_map
*sched
;
1371 shared_domain
= isl_set_copy(shared_domain
);
1372 sched
= isl_union_map_copy(gen
->tiled_sched
);
1373 dim
= isl_union_map_get_dim(sched
);
1374 proj
= projection(dim
, gen
->tiled_len
, shared_len
);
1375 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
1376 sched
= isl_union_map_intersect_range(sched
,
1377 isl_union_set_from_set(isl_set_copy(shared_domain
)));
1378 if (shared_len
!= gen
->shared_len
) {
1379 dim
= isl_union_map_get_dim(sched
);
1380 proj
= projection(dim
, gen
->shared_len
, shared_len
);
1381 proj
= isl_map_reverse(proj
);
1382 shared_domain
= isl_set_apply(shared_domain
,
1383 isl_map_copy(proj
));
1384 sched
= isl_union_map_apply_range(sched
,
1385 isl_union_map_from_map(proj
));
1388 dim
= isl_union_map_get_dim(sched
);
1389 par
= parametrization(dim
, gen
->shared_len
, 0, gen
->shared_len
, "g");
1390 sched
= isl_union_map_intersect_range(sched
,
1391 isl_union_set_from_set(par
));
1393 for (i
= 0; i
< gen
->n_array
; ++i
) {
1394 struct cuda_array_info
*array
= &gen
->array
[i
];
1396 if (gen
->array
[i
].print_shared_level
!= level
)
1399 for (j
= 0; j
< array
->n_group
; ++j
) {
1400 if (print_group_shared_accesses(gen
, array
->groups
[j
],
1401 type
, shared_domain
, sched
))
1406 isl_union_map_free(sched
);
1407 isl_set_free(shared_domain
);
1410 print_indent(gen
->cuda
.kernel_c
, gen
->kernel_code
.indent
);
1411 fprintf(gen
->cuda
.kernel_c
, "__syncthreads();\n");
1415 /* Given an index expression into a tile of an array, adjust the expression
1416 * to a shift of the tile to the origin
1417 * (based on the lower bounds in array->shared_bound).
1418 * If the index is strided, then we first add
1419 * bound->shift and divide by bound->stride.
1421 static __isl_give isl_qpolynomial
*shift_index(__isl_take isl_qpolynomial
*qp
,
1422 struct cuda_array_info
*array
,
1423 struct cuda_array_bound
*bound
, __isl_take isl_set
*domain
)
1425 isl_qpolynomial
*lb
;
1428 isl_qpolynomial
*shift
, *t
;
1431 shift
= bound
->shift
;
1432 shift
= isl_qpolynomial_copy(shift
);
1433 shift
= isl_qpolynomial_drop_dims(shift
, isl_dim_set
, 0,
1434 isl_qpolynomial_dim(shift
, isl_dim_set
));
1435 shift
= isl_qpolynomial_align_params(shift
,
1436 isl_qpolynomial_get_dim(qp
));
1437 qp
= isl_qpolynomial_add(qp
, shift
);
1438 dim
= isl_qpolynomial_get_dim(qp
);
1440 isl_int_set_si(one
, 1);
1441 t
= isl_qpolynomial_rat_cst(dim
, one
, bound
->stride
);
1443 qp
= isl_qpolynomial_mul(qp
, t
);
1446 lb
= isl_qpolynomial_copy(bound
->lb
);
1447 lb
= isl_qpolynomial_drop_dims(lb
, isl_dim_set
, 0,
1448 isl_qpolynomial_dim(lb
, isl_dim_set
));
1450 lb
= isl_qpolynomial_align_params(lb
, isl_qpolynomial_get_dim(qp
));
1452 qp
= isl_qpolynomial_sub(qp
, lb
);
1453 qp
= isl_qpolynomial_gist(qp
, domain
);
1458 /* This function is called for each access to an array in some statement
1459 * in the original code.
1460 * Replace that access by an access to shared or (linearized) global memory.
1461 * Since the array in shared memory is just
1462 * a shifted copy of part of the original array, we simply need
1463 * to subtract the lower bound, which was computed
1464 * in can_tile_for_shared_memory.
1465 * If any of the indices is strided, then we first add
1466 * shared_bound[i].shift and divide by shared_bound[i].stride.
1468 * If the given array is accessed directly from global memory,
1469 * we don't need to perform any shifting and simply simplify
1470 * expression in the context of the domain instead.
1472 * If the array space (range of access) has no name, then we are
1473 * accessing an iterator in the original program.
1475 static void print_access(struct cuda_gen
*gen
, __isl_take isl_map
*access
,
1481 struct cuda_array_info
*array
= NULL
;
1486 struct cuda_array_bound
*bounds
= NULL
;
1488 access
= isl_map_align_params(access
,
1489 isl_set_get_dim(gen
->stmt_domain
));
1491 data_set
= isl_set_apply(isl_set_copy(gen
->stmt_domain
), access
);
1493 name
= isl_set_get_tuple_name(data_set
);
1496 fprintf(gen
->cuda
.kernel_c
, "(");
1498 struct cuda_array_ref_group
*group
;
1500 for (i
= 0; i
< gen
->n_array
; ++i
) {
1501 if (strcmp(name
, gen
->array
[i
].name
))
1503 array
= &gen
->array
[i
];
1506 group
= array
->groups
[group_nr
];
1507 bounds
= group
->private_bound
;
1509 bounds
= group
->shared_bound
;
1511 print_array_name(gen
->cuda
.kernel_c
, group
);
1512 fprintf(gen
->cuda
.kernel_c
, "[");
1516 n_index
= isl_set_dim(data_set
, isl_dim_set
);
1517 aff
= isl_set_affine_hull(data_set
);
1519 prn
= isl_printer_to_file(gen
->ctx
, gen
->cuda
.kernel_c
);
1520 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
1523 for (i
= 0; i
+ 1 < n_index
; ++i
)
1524 prn
= isl_printer_print_str(prn
, "(");
1526 for (i
= 0; i
< n_index
; ++i
) {
1528 isl_qpolynomial
*qp
;
1531 ok
= isl_basic_set_has_defining_equality(aff
,
1532 isl_dim_out
, i
, &c
);
1534 qp
= isl_qpolynomial_from_constraint(c
, isl_dim_out
, i
);
1535 qp
= isl_qpolynomial_drop_dims(qp
, isl_dim_set
, 0,
1536 isl_qpolynomial_dim(qp
, isl_dim_set
));
1539 prn
= isl_printer_print_qpolynomial(prn
, qp
);
1540 isl_qpolynomial_free(qp
);
1544 domain
= isl_set_copy(gen
->stmt_domain
);
1545 domain
= isl_set_project_out(domain
, isl_dim_set
, 0,
1546 isl_set_dim(domain
, isl_dim_set
));
1548 qp
= isl_qpolynomial_gist(qp
, domain
);
1550 qp
= shift_index(qp
, array
, &bounds
[i
], domain
);
1554 prn
= isl_printer_print_str(prn
, ") * (");
1555 prn
= isl_printer_print_pw_qpolynomial_fold(prn
,
1556 array
->local_bound
[i
]);
1557 prn
= isl_printer_print_str(prn
, ") + ");
1559 prn
= isl_printer_print_str(prn
, "][");
1561 prn
= isl_printer_print_qpolynomial(prn
, qp
);
1562 isl_qpolynomial_free(qp
);
1565 prn
= isl_printer_print_str(prn
, ")");
1567 prn
= isl_printer_print_str(prn
, "]");
1568 isl_printer_free(prn
);
1570 isl_basic_set_free(aff
);
1573 static void print_stmt_body(struct cuda_gen
*gen
,
1574 FILE *out
, struct cuda_stmt
*stmt
)
1577 struct cuda_stmt_access
*access
;
1579 for (access
= stmt
->accesses
; access
; access
= access
->next
) {
1580 fwrite(stmt
->text
+ last
, 1, access
->text_offset
- last
, out
);
1581 last
= access
->text_offset
+ access
->text_len
;
1583 print_access(gen
, isl_map_copy(access
->access
),
1587 fprintf(out
, "%s\n", stmt
->text
+ last
);
1590 /* This function is called for each leaf in the innermost clast,
1591 * i.e., for each statemetn.
1592 * We print the statement body, simplifying the accesses based
1595 static void print_statement(struct gpucode_info
*code
,
1596 struct clast_user_stmt
*u
)
1598 struct cuda_gen
*gen
= code
->user
;
1601 isl_set
*stmt_domain
;
1602 isl_union_map
*stmt_sched
;
1603 isl_union_set
*uset
;
1605 struct cuda_stmt
*stmt
;
1607 nr
= atoi(u
->statement
->name
+ 2);
1608 stmt
= &gen
->stmts
[nr
];
1610 stmt_domain
= extract_host_domain(u
);
1612 stmt_sched
= isl_union_map_intersect_range(
1613 isl_union_map_copy(gen
->local_sched
),
1614 isl_union_set_from_set(extend(stmt_domain
,
1615 gen
->thread_tiled_len
)));
1616 dim
= isl_union_map_get_dim(stmt_sched
);
1617 par
= parametrization(dim
, gen
->thread_tiled_len
, 0,
1618 gen
->thread_tiled_len
, "c");
1619 stmt_sched
= isl_union_map_intersect_range(stmt_sched
,
1620 isl_union_set_from_set(par
));
1622 uset
= isl_union_map_domain(stmt_sched
);
1623 dim
= isl_union_set_get_dim(uset
);
1624 dim
= isl_dim_add(dim
, isl_dim_set
,
1625 isl_set_dim(stmt
->domain
, isl_dim_set
));
1626 dim
= isl_dim_set_tuple_name(dim
, isl_dim_set
, u
->statement
->name
);
1627 gen
->stmt_domain
= isl_union_set_extract_set(uset
, dim
);
1628 isl_union_set_free(uset
);
1630 print_indent(code
->dst
, code
->indent
);
1631 print_stmt_body(gen
, code
->dst
, stmt
);
1633 isl_set_free(gen
->stmt_domain
);
1636 /* Print an access to the element in the global memory copy of the
1637 * given array that corresponds to element [qp[0]][qp[1]]...
1638 * of the original array.
1639 * The copy in global memory has been linearized, so we need to take
1640 * the array size into account.
1642 static void print_private_global_index(isl_ctx
*ctx
, FILE *out
,
1643 struct cuda_array_info
*array
, __isl_keep isl_qpolynomial
**qp
)
1648 fprintf(out
, "%s[", array
->name
);
1649 prn
= isl_printer_to_file(ctx
, out
);
1650 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
1651 for (i
= 0; i
+ 1 < array
->n_index
; ++i
)
1652 prn
= isl_printer_print_str(prn
, "(");
1653 for (i
= 0; i
< array
->n_index
; ++i
) {
1655 prn
= isl_printer_print_str(prn
, ") * (");
1656 prn
= isl_printer_print_pw_qpolynomial_fold(prn
,
1657 array
->local_bound
[i
]);
1658 prn
= isl_printer_print_str(prn
, ") + ");
1660 prn
= isl_printer_print_qpolynomial(prn
, qp
[i
]);
1662 isl_printer_free(prn
);
1666 /* Print an access to the element in the shared memory copy of the
1667 * given array reference group that corresponds to element [qps[0]][qps[1]]...
1668 * of the original array.
1669 * Since the array in shared memory is just a shifted copy of part
1670 * of the original array, we simply need to subtract the lower bound,
1671 * which was computed in can_tile_for_shared_memory.
1672 * If any of the indices is strided, then we first add
1673 * shared_bound[i].shift and divide by shared_bound[i].stride.
1675 static void print_private_local_index(isl_ctx
*ctx
, FILE *out
,
1676 struct cuda_array_ref_group
*group
,
1677 __isl_keep isl_qpolynomial
**qps
, __isl_keep isl_set
*domain
)
1681 struct cuda_array_info
*array
= group
->array
;
1682 struct cuda_array_bound
*bounds
= group
->private_bound
;
1684 print_array_name(out
, group
);
1685 for (i
= 0; i
< array
->n_index
; ++i
) {
1686 isl_qpolynomial
*qp
= isl_qpolynomial_copy(qps
[i
]);
1688 qp
= shift_index(qp
, array
, &bounds
[i
], isl_set_copy(domain
));
1691 prn
= isl_printer_to_file(ctx
, out
);
1692 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
1693 prn
= isl_printer_print_qpolynomial(prn
, qp
);
1694 isl_printer_free(prn
);
1696 isl_qpolynomial_free(qp
);
1700 /* This function is called for each leaf in the clast of the code
1701 * for copying to or from private memory.
1702 * The statement name is read_private_<array> or write_private_<array>.
1704 * The schedule iterates over the array elements, so we can use
1705 * the domain of private_sched at the current scheduling position
1706 * as the index of the array.
1708 static void print_private_copy_statement(struct gpucode_info
*code
,
1709 struct clast_user_stmt
*u
)
1711 struct cuda_gen
*gen
= code
->user
;
1714 struct cuda_array_ref_group
*group
= gen
->private_group
;
1723 isl_qpolynomial
**qp
;
1726 read
= !strncmp(u
->statement
->name
, "read", 4);
1728 domain
= extract_host_domain(u
);
1731 sched
= isl_map_copy(gen
->private_sched
);
1732 sched
= isl_map_reverse(sched
);
1733 sched
= isl_map_intersect_domain(sched
, domain
);
1734 n_in
= isl_map_dim(sched
, isl_dim_in
);
1735 n_out
= isl_map_dim(sched
, isl_dim_out
);
1736 dim
= isl_map_get_dim(sched
);
1737 dim
= isl_dim_drop(dim
, isl_dim_in
, 0, n_in
);
1738 dim
= isl_dim_drop(dim
, isl_dim_out
, 0, n_out
);
1739 param
= parametrization(dim
, n_in
, 0, n_in
, "c");
1740 sched
= isl_map_align_params(sched
, isl_set_get_dim(param
));
1741 sched
= isl_map_intersect_domain(sched
, param
);
1742 index
= isl_map_range(sched
);
1743 domain
= isl_set_copy(index
);
1744 aff
= isl_set_affine_hull(index
);
1745 domain
= isl_set_project_out(domain
, isl_dim_set
, 0, n_out
);
1747 ctx
= isl_basic_set_get_ctx(aff
);
1748 qp
= isl_alloc_array(ctx
, isl_qpolynomial
*, n_out
);
1751 for (i
= 0; i
< n_out
; ++i
) {
1755 ok
= isl_basic_set_has_defining_equality(aff
,
1756 isl_dim_set
, i
, &c
);
1758 qp
[i
] = isl_qpolynomial_from_constraint(c
, isl_dim_set
, i
);
1759 qp
[i
] = isl_qpolynomial_drop_dims(qp
[i
], isl_dim_set
, 0, n_out
);
1762 print_indent(code
->dst
, code
->indent
);
1764 print_private_local_index(ctx
, code
->dst
, group
, qp
, domain
);
1765 fprintf(code
->dst
, " = ");
1766 print_private_global_index(ctx
, code
->dst
, group
->array
, qp
);
1768 print_private_global_index(ctx
, code
->dst
, group
->array
, qp
);
1769 fprintf(code
->dst
, " = ");
1770 print_private_local_index(ctx
, code
->dst
, group
, qp
, domain
);
1772 fprintf(code
->dst
, ";\n");
1774 for (i
= 0; i
< n_out
; ++i
)
1775 isl_qpolynomial_free(qp
[i
]);
1778 isl_basic_set_free(aff
);
1779 isl_set_free(domain
);
1782 static void print_private_access(struct cuda_gen
*gen
,
1783 __isl_keep isl_set
*shared_domain
, __isl_take isl_set
*access
,
1784 const char *type
, struct cuda_array_ref_group
*group
)
1786 const char *array_name
;
1789 unsigned nvar
= isl_set_dim(access
, isl_dim_set
);
1790 isl_union_map
*usched
;
1792 if (isl_set_fast_is_empty(access
)) {
1793 isl_set_free(access
);
1797 ctx
= isl_set_get_ctx(access
);
1798 array_name
= isl_set_get_tuple_name(access
);
1799 name
= isl_alloc_array(ctx
, char,
1800 strlen(type
) + sizeof("_private_") + strlen(array_name
) + 20);
1801 if (group
->array
->n_group
> 1)
1802 sprintf(name
, "%s_private_%s_%d", type
, array_name
, group
->nr
);
1804 sprintf(name
, "%s_private_%s", type
, array_name
);
1805 access
= isl_set_set_tuple_name(access
, name
);
1808 gen
->private_sched
= shift_access(access
, group
);
1809 gen
->private_group
= group
;
1811 usched
= isl_union_map_from_map(isl_map_copy(gen
->private_sched
));
1812 print_shared_body(gen
, shared_domain
, usched
, nvar
,
1813 &print_private_copy_statement
, 1);
1814 isl_union_map_free(usched
);
1816 isl_map_free(gen
->private_sched
);
1819 /* Print code for reading into or writing from private memory
1820 * the given array reference group.
1822 * sched maps the original iteration domains to the shared memory tile loops.
1824 static void print_group_private_accesses(struct cuda_gen
*gen
,
1825 struct cuda_array_ref_group
*group
,
1826 const char *type
, __isl_keep isl_set
*shared_domain
,
1827 unsigned first_shared
, int shared_len
, __isl_keep isl_union_map
*sched
)
1830 isl_union_map
*access
;
1831 isl_union_set
*uset
;
1832 isl_set
*access_set
;
1834 if (!group
->private_bound
)
1837 read
= !strcmp(type
, "read");
1839 access
= group_access_relation(group
, read
, !read
);
1840 access
= isl_union_map_apply_domain(access
, isl_union_map_copy(sched
));
1841 access
= isl_union_map_intersect(access
,
1842 isl_union_map_copy(gen
->private_access
));
1843 uset
= isl_union_map_range(access
);
1845 if (isl_union_set_is_empty(uset
)) {
1846 isl_union_set_free(uset
);
1850 access_set
= isl_union_set_copy_set(uset
);
1851 isl_union_set_free(uset
);
1852 access_set
= isl_set_coalesce(access_set
);
1853 access_set
= isl_set_eliminate(access_set
, isl_dim_param
,
1854 first_shared
+ shared_len
,
1855 gen
->shared_len
- shared_len
);
1857 print_private_access(gen
, shared_domain
, access_set
, type
, group
);
1860 /* Print code for reading into or writing from private memory at
1861 * the given level (-1 for innermost).
1863 * If we are not printing at the innermost level, then the dimensionality
1864 * of shared_domain may be smaller than gen->shared_len.
1865 * As the rest of the code assumes that the domain of access has
1866 * gen->shared_len dimensions, we therefore may need to embed this domain
1867 * in a higher dimensional space after intersection with shared_domain.
1869 * This code is very similar to print_shared_accesses.
1870 * The main difference is that we to take into account gen->private_access.
1872 static void print_private_accesses(struct cuda_gen
*gen
,
1873 __isl_keep isl_set
*shared_domain
, __isl_keep isl_union_map
*access
,
1874 const char *type
, int level
)
1879 int shared_len
= isl_set_dim(shared_domain
, isl_dim_set
);
1880 unsigned first_shared
;
1881 isl_union_map
*sched
;
1883 shared_domain
= isl_set_copy(shared_domain
);
1884 sched
= isl_union_map_copy(gen
->tiled_sched
);
1885 dim
= isl_union_map_get_dim(sched
);
1886 first_shared
= isl_dim_size(dim
, isl_dim_param
);
1887 proj
= projection(dim
, gen
->tiled_len
, shared_len
);
1888 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
1889 sched
= isl_union_map_intersect_range(sched
,
1890 isl_union_set_from_set(isl_set_copy(shared_domain
)));
1891 if (shared_len
!= gen
->shared_len
) {
1892 dim
= isl_union_map_get_dim(sched
);
1893 proj
= projection(dim
, gen
->shared_len
, shared_len
);
1894 proj
= isl_map_reverse(proj
);
1895 shared_domain
= isl_set_apply(shared_domain
,
1896 isl_map_copy(proj
));
1897 sched
= isl_union_map_apply_range(sched
,
1898 isl_union_map_from_map(proj
));
1901 for (i
= 0; i
< gen
->n_array
; ++i
) {
1902 struct cuda_array_info
*array
= &gen
->array
[i
];
1904 if (gen
->array
[i
].print_shared_level
!= level
)
1907 for (j
= 0; j
< array
->n_group
; ++j
)
1908 print_group_private_accesses(gen
, array
->groups
[j
],
1909 type
, shared_domain
,
1910 first_shared
, shared_len
, sched
);
1913 isl_union_map_free(sched
);
1914 isl_set_free(shared_domain
);
1917 /* Set unroll[j] if the input dimension j is involved in
1918 * the index expression represented by bmap.
1920 static int check_unroll(__isl_take isl_basic_map
*bmap
, void *user
)
1923 int n_in
= isl_basic_map_dim(bmap
, isl_dim_in
);
1924 int n_out
= isl_basic_map_dim(bmap
, isl_dim_out
);
1927 for (i
= 0; i
< n_out
; ++i
) {
1931 ok
= isl_basic_map_has_defining_equality(bmap
,
1932 isl_dim_out
, i
, &c
);
1934 for (j
= 0; j
< n_in
; ++j
)
1935 if (isl_constraint_involves_dims(c
, isl_dim_in
, j
, 1))
1937 isl_constraint_free(c
);
1940 isl_basic_map_free(bmap
);
1944 /* Given an array pos mapping input dimensions to the corresponding
1945 * output dimension, construct the corresponding map.
1947 static __isl_give isl_map
*permutation(__isl_take isl_dim
*dim
,
1952 isl_basic_map
*bmap
;
1954 dim
= isl_dim_add(dim
, isl_dim_in
, len
);
1955 dim
= isl_dim_add(dim
, isl_dim_out
, len
);
1956 bmap
= isl_basic_map_universe(isl_dim_copy(dim
));
1958 for (i
= 0; i
< len
; ++i
) {
1959 c
= isl_equality_alloc(isl_dim_copy(dim
));
1960 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, -1);
1961 isl_constraint_set_coefficient_si(c
, isl_dim_out
, pos
[i
], 1);
1962 bmap
= isl_basic_map_add_constraint(bmap
, c
);
1966 return isl_map_from_basic_map(bmap
);
1969 /* Find all loops involved in any of the index expressions for any of
1970 * the private accesses, move them innermost and then mark them as
1971 * requiring unrolling by setting gen->first_unroll.
1972 * The loops involved should all be parallel because of the checks
1973 * we performed in check_private_group_access. Moving them innermost
1974 * is therefore a valid transformation.
1976 static __isl_give isl_union_map
*interchange_for_unroll(struct cuda_gen
*gen
,
1977 __isl_take isl_union_map
*sched
)
1980 int unroll
[gen
->thread_tiled_len
];
1981 int perm
[gen
->thread_tiled_len
];
1984 int len
= gen
->shared_len
+ gen
->n_parallel
+ gen
->n_block
;
1986 gen
->first_unroll
= -1;
1988 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
)
1990 for (i
= 0; i
< gen
->n_array
; ++i
) {
1991 struct cuda_array_info
*array
= &gen
->array
[i
];
1993 for (j
= 0; j
< array
->n_group
; ++j
) {
1994 isl_union_map
*access
;
1998 if (!array
->groups
[j
]->private_bound
)
2001 access
= group_access_relation(array
->groups
[j
], 1, 1);
2002 access
= isl_union_map_apply_domain(access
,
2003 isl_union_map_copy(sched
));
2005 dim
= isl_union_map_get_dim(access
);
2006 dim
= isl_dim_add(dim
, isl_dim_out
, array
->n_index
);
2007 dim
= isl_dim_set_tuple_name(dim
, isl_dim_out
,
2009 dim
= isl_dim_add(dim
, isl_dim_in
,
2010 gen
->thread_tiled_len
);
2011 acc
= isl_union_map_extract_map(access
, dim
);
2013 isl_map_foreach_basic_map(acc
, &check_unroll
, unroll
);
2016 isl_union_map_free(access
);
2020 for (i
= 0; i
< gen
->shared_len
; ++i
)
2024 for (i
= gen
->shared_len
; i
< len
; ++i
)
2031 for (i
= len
; i
< gen
->thread_tiled_len
; ++i
)
2036 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
)
2039 gen
->first_unroll
= 1 + j
;
2040 for (i
= 0; i
< len
; ++i
)
2044 dim
= isl_union_map_get_dim(sched
);
2045 permute
= permutation(dim
, perm
, gen
->thread_tiled_len
);
2046 sched
= isl_union_map_apply_range(sched
,
2047 isl_union_map_from_map(permute
));
2052 /* This function is called for each leaf in the clast of the kernel code.
2053 * We first specialize the schedule to the site of the leaf and
2054 * print code for reading into shared memory, performing the actual
2055 * computations and writing from shared memory, with the required
2058 static void print_kernel_user(struct gpucode_info
*code
,
2059 struct clast_user_stmt
*u
)
2061 struct cuda_gen
*gen
= code
->user
;
2062 isl_set
*shared_domain
;
2064 shared_domain
= extract_entire_host_domain(u
);
2066 print_shared_accesses(gen
, shared_domain
, gen
->read
, "read", -1);
2068 print_private_accesses(gen
, shared_domain
, gen
->read
, "read", -1);
2070 print_shared_body(gen
, shared_domain
, gen
->local_sched
,
2071 gen
->thread_tiled_len
, &print_statement
,
2074 print_private_accesses(gen
, shared_domain
, gen
->write
, "write", -1);
2076 print_indent(gen
->cuda
.kernel_c
, gen
->kernel_code
.indent
);
2077 fprintf(gen
->cuda
.kernel_c
, "__syncthreads();\n");
2079 print_shared_accesses(gen
, shared_domain
, gen
->write
, "write", -1);
2081 isl_set_free(shared_domain
);
2084 /* Check if we need to perform any copying to shared memory at this level
2085 * and if so, print the copying instructions.
2086 * Any array for which we are allowed to print copying instructions at
2087 * this level, but haven't done so already, is printed.
2089 static void print_kernel_for_head(struct gpucode_info
*code
,
2090 struct clast_for
*f
)
2093 struct cuda_gen
*gen
= code
->user
;
2098 domain
= isl_set_from_cloog_domain(cloog_domain_copy(f
->domain
));
2099 level
= isl_set_dim(domain
, isl_dim_set
) - 1;
2101 for (i
= 0; i
< gen
->n_array
; ++i
) {
2102 if (gen
->array
[i
].print_shared_level
>= 0)
2104 if (gen
->array
[i
].last_shared
> level
)
2106 gen
->array
[i
].print_shared_level
= level
;
2111 print_shared_accesses(gen
, domain
, gen
->read
, "read", level
);
2112 print_private_accesses(gen
, domain
, gen
->read
, "read", level
);
2115 isl_set_free(domain
);
2118 /* Print instructions for copying from shared memory for each array
2119 * for which print_kernel_for_head has added copying instructions
2122 static void print_kernel_for_foot(struct gpucode_info
*code
,
2123 struct clast_for
*f
)
2126 struct cuda_gen
*gen
= code
->user
;
2131 domain
= isl_set_from_cloog_domain(cloog_domain_copy(f
->domain
));
2132 level
= isl_set_dim(domain
, isl_dim_set
) - 1;
2134 for (i
= 0; i
< gen
->n_array
; ++i
) {
2135 if (gen
->array
[i
].print_shared_level
!= level
)
2142 print_private_accesses(gen
, domain
, gen
->write
, "write", level
);
2143 print_shared_accesses(gen
, domain
, gen
->write
, "write", level
);
2146 isl_set_free(domain
);
2149 /* Use CLooG to generate code for the outer gen->shared_first loops
2150 * of the local schedule "sched".
2151 * The pretty printing of this code is handled by gpu_print_host_stmt,
2152 * which calls print_kernel_user for each iteration of the shared tile loops.
2154 static void print_cloog_kernel_body(struct cuda_gen
*gen
,
2155 __isl_keep isl_set
*context
, __isl_keep isl_union_map
*sched
)
2158 CloogOptions
*options
;
2159 CloogDomain
*cloog_context
;
2160 CloogUnionDomain
*ud
;
2162 struct clast_stmt
*stmt
;
2165 sched
= isl_union_map_copy(sched
);
2166 sched
= isl_union_map_align_params(sched
, isl_set_get_dim(context
));
2168 options
= cloog_options_malloc(gen
->state
);
2169 options
->language
= LANGUAGE_C
;
2170 options
->strides
= 1;
2172 options
->stop
= gen
->shared_len
;
2173 options
->f
= gen
->tiled_len
;
2174 options
->l
= gen
->tiled_len
;
2175 options
->save_domains
= 1;
2176 options
->noscalars
= 1;
2178 ud
= cloog_union_domain_from_isl_union_map(sched
);
2179 for (i
= 0; i
< gen
->shared_len
; ++i
) {
2180 snprintf(name
, sizeof(name
), "g%d", i
);
2181 ud
= cloog_union_domain_set_name(ud
, CLOOG_SCAT
, i
, name
);
2183 cloog_context
= cloog_domain_from_isl_set(isl_set_copy(context
));
2184 input
= cloog_input_alloc(cloog_context
, ud
);
2186 stmt
= cloog_clast_create_from_input(input
, options
);
2188 gen
->kernel_code
.indent
= 4;
2189 gen
->kernel_code
.dst
= gen
->cuda
.kernel_c
;
2190 gen
->kernel_code
.print_user_stmt
= NULL
;
2191 gen
->kernel_code
.print_user_stmt_list
= &print_kernel_user
;
2192 gen
->kernel_code
.print_for_head
= &print_kernel_for_head
;
2193 gen
->kernel_code
.print_for_foot
= &print_kernel_for_foot
;
2194 gen
->kernel_code
.user
= gen
;
2195 gpu_print_host_stmt(&gen
->kernel_code
, stmt
);
2197 cloog_clast_free(stmt
);
2198 cloog_options_free(options
);
2201 static void print_kernel_iterators(struct cuda_gen
*gen
)
2204 const char *block_dims
[] = { "blockIdx.x", "blockIdx.y" };
2205 const char *thread_dims
[] = { "threadIdx.x", "threadIdx.y",
2208 if (gen
->n_grid
> 0) {
2209 print_indent(gen
->cuda
.kernel_c
, 4);
2210 fprintf(gen
->cuda
.kernel_c
, "int ");
2211 for (i
= 0; i
< gen
->n_grid
; ++i
) {
2213 fprintf(gen
->cuda
.kernel_c
, ", ");
2214 fprintf(gen
->cuda
.kernel_c
, "b%d = %s",
2215 i
, block_dims
[gen
->n_grid
- 1 - i
]);
2217 fprintf(gen
->cuda
.kernel_c
, ";\n");
2220 if (gen
->n_block
> 0) {
2221 print_indent(gen
->cuda
.kernel_c
, 4);
2222 fprintf(gen
->cuda
.kernel_c
, "int ");
2223 for (i
= 0; i
< gen
->n_block
; ++i
) {
2225 fprintf(gen
->cuda
.kernel_c
, ", ");
2226 fprintf(gen
->cuda
.kernel_c
, "t%d = %s",
2227 i
, thread_dims
[gen
->n_block
- 1 - i
]);
2229 fprintf(gen
->cuda
.kernel_c
, ";\n");
2233 static void print_group_shared_array(struct cuda_gen
*gen
,
2234 struct cuda_array_ref_group
*group
)
2237 struct cuda_array_bound
*bounds
;
2239 bounds
= group
->private_bound
;
2241 bounds
= group
->shared_bound
;
2245 print_indent(gen
->cuda
.kernel_c
, 4);
2246 fprintf(gen
->cuda
.kernel_c
, "%s%s ",
2247 group
->private_bound
? "" : "__shared__ ", gen
->options
->type
);
2248 print_array_name(gen
->cuda
.kernel_c
, group
);
2249 for (j
= 0; j
< group
->array
->n_index
; ++j
) {
2250 fprintf(gen
->cuda
.kernel_c
, "[");
2251 isl_int_print(gen
->cuda
.kernel_c
, bounds
[j
].size
, 0);
2252 fprintf(gen
->cuda
.kernel_c
, "]");
2254 fprintf(gen
->cuda
.kernel_c
, ";\n");
2257 static void print_shared_arrays(struct cuda_gen
*gen
)
2261 for (i
= 0; i
< gen
->n_array
; ++i
) {
2262 struct cuda_array_info
*array
= &gen
->array
[i
];
2264 for (j
= 0; j
< array
->n_group
; ++j
)
2265 print_group_shared_array(gen
, array
->groups
[j
]);
2269 static void print_kernel_body(struct cuda_gen
*gen
,
2270 __isl_keep isl_set
*host_domain
, __isl_keep isl_union_map
*sched
)
2274 context
= isl_set_copy(host_domain
);
2275 context
= parametrize(context
, 0, gen
->tile_first
, "h");
2276 context
= isl_set_project_out(context
, isl_dim_set
, 0, gen
->tile_first
);
2277 context
= add_bounded_parameters(context
,
2278 gen
->n_grid
, gen
->grid_dim
, "b");
2280 print_kernel_iterators(gen
);
2281 print_shared_arrays(gen
);
2283 fprintf(gen
->cuda
.kernel_c
, "\n");
2285 print_cloog_kernel_body(gen
, context
, sched
);
2287 isl_set_free(context
);
2290 /* Given a constraint
2292 * a(p,i) + j = g f(e)
2294 * or -a(p,i) - j = g f(e) if sign < 0,
2295 * store a(p,i) in bound->shift and g (stride) in bound->stride.
2296 * a(p,i) is assumed to be an expression in only the parameters.
2298 static void extract_stride(__isl_keep isl_constraint
*c
,
2299 struct cuda_array_bound
*bound
, isl_int stride
, int sign
)
2306 isl_qpolynomial
*qp
;
2308 isl_int_set(bound
->stride
, stride
);
2310 dim
= isl_constraint_get_dim(c
);
2311 dim
= isl_dim_drop(dim
, isl_dim_out
, 0, 1);
2312 dim
= isl_dim_drop(dim
, isl_dim_in
, 0, isl_dim_size(dim
, isl_dim_in
));
2313 dim
= isl_dim_domain(dim
);
2315 nparam
= isl_dim_size(dim
, isl_dim_param
);
2319 isl_int_set_si(one
, 1);
2321 isl_constraint_get_constant(c
, &v
);
2324 qp
= isl_qpolynomial_rat_cst(isl_dim_copy(dim
), v
, one
);
2326 for (i
= 0; i
< nparam
; ++i
) {
2327 isl_qpolynomial
*t
, *p
;
2329 isl_constraint_get_coefficient(c
, isl_dim_param
, i
, &v
);
2330 if (isl_int_is_zero(v
))
2334 t
= isl_qpolynomial_rat_cst(isl_dim_copy(dim
), v
, one
);
2335 p
= isl_qpolynomial_var(isl_dim_copy(dim
), isl_dim_param
, i
);
2336 t
= isl_qpolynomial_mul(t
, p
);
2337 qp
= isl_qpolynomial_add(qp
, t
);
2347 /* Given an equality constraint of a map with a single output dimension j,
2348 * check if the constraint is of the form
2350 * a(p,i) + j = g f(e)
2352 * with a(p,i) an expression in the parameters and input dimensions
2353 * and f(e) an expression in the existentially quantified variables.
2354 * If so, and if g is larger than any such g from a previously considered
2355 * constraint, then call extract_stride. to record the stride information
2358 static int check_stride_constraint(__isl_take isl_constraint
*c
, void *user
)
2363 struct cuda_array_bound
*bound
= user
;
2366 isl_int_init(stride
);
2368 n_div
= isl_constraint_dim(c
, isl_dim_div
);
2369 isl_constraint_get_coefficient(c
, isl_dim_out
, 0, &v
);
2371 if (n_div
&& (isl_int_is_one(v
) || isl_int_is_negone(v
))) {
2372 int s
= isl_int_sgn(v
);
2373 isl_int_set_si(stride
, 0);
2374 for (i
= 0; i
< n_div
; ++i
) {
2375 isl_constraint_get_coefficient(c
, isl_dim_div
, i
, &v
);
2376 isl_int_gcd(stride
, stride
, v
);
2378 if (!isl_int_is_zero(stride
) &&
2379 isl_int_gt(stride
, bound
->stride
))
2380 extract_stride(c
, bound
, stride
, s
);
2383 isl_int_clear(stride
);
2386 isl_constraint_free(c
);
2390 /* Given contraints on an array index i, check if we can find
2391 * a shift a(p) and a stride g such that
2393 * a(p) + i = 0 mod g
2395 * If so, record the information in bound and apply the mapping
2396 * i -> (i + a(p))/g to the array index in bounds and return
2397 * the new constraints.
2398 * If not, simply return the original constraints.
2400 static __isl_give isl_basic_map
*check_stride(struct cuda_gen
*gen
,
2401 struct cuda_array_bound
*bound
, __isl_take isl_basic_map
*bounds
)
2405 isl_basic_map
*shift
;
2406 isl_qpolynomial
*qp
, *t
;
2409 isl_int_set_si(bound
->stride
, -1);
2411 aff
= isl_basic_map_affine_hull(isl_basic_map_copy(bounds
));
2413 isl_basic_map_foreach_constraint(aff
, &check_stride_constraint
, bound
);
2415 isl_basic_map_free(aff
);
2417 if (isl_int_is_neg(bound
->stride
))
2420 qp
= isl_qpolynomial_copy(bound
->shift
);
2421 qp
= isl_qpolynomial_add_dims(qp
, isl_dim_set
, 1);
2422 dim
= isl_qpolynomial_get_dim(qp
);
2423 t
= isl_qpolynomial_var(isl_dim_copy(dim
), isl_dim_set
, 0);
2424 qp
= isl_qpolynomial_add(qp
, t
);
2426 isl_int_set_si(one
, 1);
2427 t
= isl_qpolynomial_rat_cst(dim
, one
, bound
->stride
);
2429 qp
= isl_qpolynomial_mul(qp
, t
);
2430 shift
= isl_basic_map_from_qpolynomial(qp
);
2432 bound
->shift_map
= isl_basic_map_copy(shift
);
2433 bounds
= isl_basic_map_apply_range(bounds
, shift
);
2438 struct cuda_size_info
{
2439 isl_basic_set
*bset
;
2440 struct cuda_array_bound
*bound
;
2444 /* Given a constraint from the basic set describing the bounds on
2445 * an array index, check if it is a lower bound, say m i >= b(x), and,
2446 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
2447 * upper bound. If so, and if this bound is smaller than any bound
2448 * derived from earlier constraints, set the size to this bound on
2449 * the expression and the lower bound to b(x).
2451 static int compute_size_in_direction(__isl_take isl_constraint
*c
, void *user
)
2453 struct cuda_size_info
*size
= user
;
2458 nparam
= isl_basic_set_dim(size
->bset
, isl_dim_param
);
2459 n_div
= isl_constraint_dim(c
, isl_dim_div
);
2461 if (isl_constraint_involves_dims(c
, isl_dim_div
, 0, n_div
)) {
2462 isl_constraint_free(c
);
2468 isl_constraint_get_coefficient(c
, isl_dim_set
, size
->pos
, &v
);
2470 if (isl_int_is_pos(v
)) {
2472 isl_qpolynomial
*lb
;
2473 enum isl_lp_result res
;
2475 aff
= isl_constraint_get_bound(c
, isl_dim_set
, size
->pos
);
2476 aff
= isl_aff_ceil(aff
);
2478 lb
= isl_qpolynomial_from_aff(isl_aff_copy(aff
));
2480 aff
= isl_aff_neg(aff
);
2481 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_set
, size
->pos
, 1);
2483 res
= isl_basic_set_max(size
->bset
, aff
, &v
);
2486 if (res
== isl_lp_ok
) {
2487 isl_int_add_ui(v
, v
, 1);
2488 if (isl_int_is_neg(size
->bound
->size
) ||
2489 isl_int_lt(v
, size
->bound
->size
)) {
2490 isl_int_set(size
->bound
->size
, v
);
2491 isl_qpolynomial_free(size
->bound
->lb
);
2492 size
->bound
->lb
= isl_qpolynomial_copy(lb
);
2495 isl_qpolynomial_free(lb
);
2499 isl_constraint_free(c
);
2504 /* Given a basic map "bounds" that maps parameters and input dimensions
2505 * to a single output dimension, look for an expression in the parameters
2506 * and input dimensions such that the range of the output dimension shifted
2507 * by this expression is a constant.
2509 * In particular, we currently only consider lower bounds on the output
2510 * dimension as candidate expressions.
2512 static int compute_array_dim_size(struct cuda_gen
*gen
,
2513 struct cuda_array_bound
*bound
, __isl_take isl_basic_map
*bounds
)
2515 struct cuda_size_info size
;
2517 bounds
= check_stride(gen
, bound
, bounds
);
2519 isl_int_set_si(bound
->size
, -1);
2523 size
.pos
= isl_basic_map_dim(bounds
, isl_dim_in
);
2524 size
.bset
= isl_basic_map_wrap(bounds
);
2525 size
.bset
= isl_basic_set_flatten(size
.bset
);
2526 isl_basic_set_foreach_constraint(size
.bset
, &compute_size_in_direction
,
2528 isl_basic_set_free(size
.bset
);
2530 return isl_int_is_nonneg(bound
->size
) ? 0 : -1;
2533 /* Check if we can find a shared memory tile for the given array
2534 * based on the given accesses, and if so, put the results
2535 * in array->shared_bound.
2537 * We project the accesses on each index in turn and look for a parametric
2538 * offset such that the size is constant.
2540 static int can_tile_for_shared_memory(struct cuda_gen
*gen
,
2541 struct cuda_array_info
*array
, __isl_keep isl_map
*access
,
2542 struct cuda_array_bound
*bounds
)
2546 for (i
= 0; i
< array
->n_index
; ++i
) {
2548 isl_basic_map
*hull
;
2550 access_i
= isl_map_copy(access
);
2551 access_i
= isl_map_project_out(access_i
, isl_dim_out
, 0, i
);
2552 access_i
= isl_map_project_out(access_i
, isl_dim_out
,
2553 i
+ 1, array
->n_index
- (i
+ 1));
2554 access_i
= isl_map_compute_divs(access_i
);
2555 hull
= isl_map_simple_hull(access_i
);
2556 if (compute_array_dim_size(gen
, &bounds
[i
], hull
) < 0)
2563 /* Construct a map with input the shared tile loops and the loops that
2564 * will be wrapped around the threads that relates these later loops
2565 * to the thread indices and the projects them out.
2567 static __isl_give isl_map
*compute_privatization(struct cuda_gen
*gen
)
2575 dim
= isl_union_map_get_dim(gen
->shared_sched
);
2577 if (gen
->options
->wrap
)
2578 tiling
= wrap(isl_dim_copy(dim
), gen
->shared_len
+ gen
->n_block
,
2579 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
2581 tiling
= tile(isl_dim_copy(dim
), gen
->shared_len
+ gen
->n_block
,
2582 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
2586 par
= parametrization(dim
, gen
->shared_len
+ 2 * gen
->n_block
,
2587 gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
+ gen
->n_block
,
2590 priv
= isl_map_align_params(priv
, isl_set_get_dim(par
));
2591 priv
= isl_map_intersect_range(priv
, par
);
2593 dim
= isl_map_get_dim(priv
);
2594 dim
= isl_dim_drop(dim
, isl_dim_in
, 0, isl_dim_size(dim
, isl_dim_in
));
2595 dim
= isl_dim_drop(dim
, isl_dim_out
, 0, isl_dim_size(dim
, isl_dim_out
));
2596 proj
= projection(dim
, gen
->shared_len
+ 2 * gen
->n_block
,
2599 priv
= isl_map_apply_range(priv
, proj
);
2604 /* Construct a map from domain_dim to domain_dim that increments
2605 * the dimension at position "pos" and leaves all other dimensions
2608 static __isl_give isl_map
*next(__isl_take isl_dim
*domain_dim
, int pos
)
2611 int len
= isl_dim_size(domain_dim
, isl_dim_set
);
2613 isl_basic_map
*next
;
2615 dim
= isl_dim_map_from_set(domain_dim
);
2616 next
= isl_basic_map_universe(isl_dim_copy(dim
));
2618 for (i
= 0; i
< len
; ++i
) {
2621 c
= isl_equality_alloc(isl_dim_copy(dim
));
2622 isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, 1);
2623 isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
2625 isl_constraint_set_constant_si(c
, 1);
2626 next
= isl_basic_map_add_constraint(next
, c
);
2631 return isl_map_from_basic_map(next
);
2634 /* Check if the given access is coalesced.
2635 * That is, check whether incrementing the dimension that will get
2636 * wrapped over the last thread index results in incrementing
2637 * the last array index.
2639 * This function is only called for access relations without reuse.
2641 static int access_is_coalesced(struct cuda_gen
*gen
,
2642 __isl_keep isl_union_map
*access
)
2645 isl_map
*access_map
;
2646 isl_map
*next_thread_x
;
2647 isl_map
*next_element
;
2651 access
= isl_union_map_copy(access
);
2652 access
= isl_union_map_apply_domain(access
,
2653 isl_union_map_copy(gen
->tiled_sched
));
2654 access_map
= isl_union_map_copy_map(access
);
2655 isl_union_map_free(access
);
2657 dim
= isl_map_get_dim(access_map
);
2658 dim
= isl_dim_domain(dim
);
2659 next_thread_x
= next(dim
, gen
->shared_len
+ gen
->n_block
- 1);
2661 dim
= isl_map_get_dim(access_map
);
2662 dim
= isl_dim_range(dim
);
2663 next_element
= next(dim
, isl_dim_size(dim
, isl_dim_set
) - 1);
2665 map
= isl_map_apply_domain(next_thread_x
, isl_map_copy(access_map
));
2666 map
= isl_map_apply_range(map
, access_map
);
2668 coalesced
= isl_map_is_subset(map
, next_element
);
2670 isl_map_free(next_element
);
2676 /* For the given array reference group, check whether the access is private
2677 * to the thread. That is, check that any given array element
2678 * is only accessed by a single thread.
2679 * We compute an access relation that maps the shared tile loop iterators
2680 * and the shared point loop iterators that will be wrapped over the
2681 * threads to the array elements.
2682 * We actually check that those iterators that will be wrapped
2683 * partition the array space. This check is stricter than necessary
2684 * since several iterations may be mapped onto the same thread
2685 * and then they could be allowed to access the same memory elements,
2686 * but our check does not allow this situation.
2688 * We also check that the index expression only depends on parallel
2689 * loops. That way, we can move those loops innermost and unroll them.
2690 * Again, we use a test that is stricter than necessary.
2691 * We actually check whether the index expression only depends
2692 * on the iterators that are wrapped over the threads.
2693 * These are necessarily parallel, but there may be more parallel loops.
2695 * Combining the injectivity of the first test with the single-valuedness
2696 * of the second test, we simply test for bijectivity.
2698 * If it turns out we can use registers, we compute the private memory
2699 * tile size using can_tile_for_shared_memory, after introducing a dependence
2700 * on the thread indices.
2702 * Before performing any of the above computations, we first check
2703 * if there is any reuse on the reference group. If not, we simply
2704 * return. If, moreover, the access is coalesced then we also remove
2705 * the shared memory tiling since we should just use global memory instead.
2707 static void check_private_group_access(struct cuda_gen
*gen
,
2708 struct cuda_array_ref_group
*group
)
2711 isl_union_map
*access
;
2712 int n_index
= group
->array
->n_index
;
2714 access
= group_access_relation(group
, 1, 1);
2715 if (isl_union_map_is_injective(access
)) {
2716 if (group
->shared_bound
&& access_is_coalesced(gen
, access
)) {
2717 free_bound_list(group
->shared_bound
, n_index
);
2718 group
->shared_bound
= NULL
;
2720 isl_union_map_free(access
);
2723 access
= isl_union_map_apply_domain(access
,
2724 isl_union_map_copy(gen
->shared_sched
));
2726 acc
= isl_union_map_copy_map(access
);
2727 isl_union_map_free(access
);
2729 if (!isl_map_is_bijective(acc
)) {
2734 group
->private_bound
= create_bound_list(gen
->ctx
, n_index
);
2735 acc
= isl_map_align_params(acc
, isl_map_get_dim(gen
->privatization
));
2736 acc
= isl_map_apply_domain(acc
, isl_map_copy(gen
->privatization
));
2737 if (!can_tile_for_shared_memory(gen
, group
->array
, acc
,
2738 group
->private_bound
)) {
2739 free_bound_list(group
->private_bound
, n_index
);
2740 group
->private_bound
= NULL
;
2746 /* Look for the last shared tile loop that affects the offset of the
2747 * shared or private tile and store the result in array->last_shared.
2749 static void set_last_shared(struct cuda_gen
*gen
,
2750 struct cuda_array_ref_group
*group
)
2753 struct cuda_array_bound
*bounds
;
2754 unsigned first_shared
= gen
->first_shared
;
2755 int n_index
= group
->array
->n_index
;
2757 bounds
= group
->private_bound
;
2759 bounds
= group
->shared_bound
;
2763 for (j
= gen
->shared_len
- 1; j
>= 0; --j
) {
2764 for (i
= 0; i
< n_index
; ++i
) {
2765 isl_qpolynomial
*lb
, *shift
;
2768 if (isl_qpolynomial_involves_dims(lb
, isl_dim_param
,
2769 first_shared
+ j
, 1))
2772 shift
= bounds
[i
].shift
;
2775 if (isl_qpolynomial_involves_dims(shift
, isl_dim_param
,
2776 first_shared
+ j
, 1))
2782 group
->array
->last_shared
= j
;
2785 /* Compute the sizes of all private arrays for the current kernel,
2786 * as well as the offsets of the private pieces in the original arrays.
2787 * If we cannot or don't want to privatize a given array group,
2788 * we use the shared memory tile sizes computed in
2789 * compute_group_shared_bound instead.
2791 * If a given Array only has a single reference group and if we have
2792 * been able to find a privated or shared tile,
2793 * we also look for the last shared tile loop that affects the offset
2794 * (and therefore the array tile) and store the result in array->last_shared.
2796 * A privatized copy of all access relations from reference groups that
2797 * are mapped to private memory is stored in gen->privatization.
2799 static void compute_private_size(struct cuda_gen
*gen
)
2802 isl_union_map
*private;
2804 private = isl_union_map_empty(isl_union_map_get_dim(gen
->shared_sched
));
2806 for (i
= 0; i
< gen
->n_array
; ++i
) {
2807 struct cuda_array_info
*array
= &gen
->array
[i
];
2809 for (j
= 0; j
< array
->n_group
; ++j
) {
2810 check_private_group_access(gen
, array
->groups
[j
]);
2812 if (!array
->groups
[j
]->private_bound
)
2815 private = isl_union_map_union(private,
2816 group_access_relation(array
->groups
[j
], 1, 1));
2819 array
->last_shared
= gen
->shared_len
- 1;
2820 array
->print_shared_level
= -1;
2822 if (array
->n_group
!= 1)
2824 set_last_shared(gen
, array
->groups
[0]);
2827 if (isl_union_map_is_empty(private))
2828 isl_union_map_free(private);
2830 isl_union_map
*priv
;
2832 private = isl_union_map_apply_domain(private,
2833 isl_union_map_copy(gen
->shared_sched
));
2834 priv
= isl_union_map_from_map(isl_map_copy(gen
->privatization
));
2835 private = isl_union_map_apply_domain(private, priv
);
2836 gen
->private_access
= private;
2840 /* Fill up the groups array with singleton groups, i.e., one group
2841 * per reference, initializing the array, access, write and refs fields.
2842 * In particular the access field is initialized to the scheduled
2843 * access relation of the array reference.
2845 * Return the number of elements initialized, i.e., the number of
2846 * active references in the current kernel.
2848 static int populate_array_references(struct cuda_gen
*gen
,
2849 struct cuda_array_info
*array
, __isl_keep isl_union_map
*sched
,
2850 struct cuda_array_ref_group
**groups
)
2854 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
2857 for (i
= 0; i
< array
->n_ref
; ++i
) {
2858 isl_union_map
*umap
;
2860 struct cuda_array_ref_group
*group
;
2861 struct cuda_stmt_access
*access
= array
->refs
[i
];
2863 map
= isl_map_copy(access
->access
);
2864 umap
= isl_union_map_from_map(map
);
2865 umap
= isl_union_map_apply_domain(umap
,
2866 isl_union_map_copy(sched
));
2868 map
= isl_union_map_copy_map(umap
);
2869 isl_union_map_free(umap
);
2871 if (isl_map_is_empty(map
)) {
2876 group
= isl_calloc_type(ctx
, struct cuda_array_ref_group
);
2878 group
->array
= array
;
2879 group
->access
= map
;
2880 group
->write
= access
->write
;
2881 group
->refs
= &array
->refs
[i
];
2883 groups
[n
++] = group
;
2889 static void free_array_ref_group(struct cuda_array_ref_group
*group
,
2894 free_bound_list(group
->shared_bound
, n_index
);
2895 free_bound_list(group
->private_bound
, n_index
);
2896 isl_map_free(group
->access
);
2901 /* If two groups have overlapping access relations and if one of them
2902 * involves a write, then merge the two groups into one.
2904 * We keep track of the grouping in "leader". leader[j] points to
2905 * an earlier group array element that belongs to the same group,
2906 * or the array element j itself if this element is the first in the group.
2908 * Return the number of group leaders.
2910 static int group_overlapping_writes(int n
,
2911 struct cuda_array_ref_group
**groups
, int *leader
)
2916 for (i
= 0; i
< n
; ++i
) {
2918 groups
[l
]->n_ref
= 1;
2919 for (j
= i
- 1; j
>= 0; --j
) {
2925 if (!groups
[l
]->write
&& !groups
[j
]->write
)
2928 map
= isl_map_intersect(isl_map_copy(groups
[l
]->access
),
2929 isl_map_copy(groups
[j
]->access
));
2930 empty
= isl_map_is_empty(map
);
2936 groups
[j
]->access
= isl_map_union(groups
[j
]->access
,
2938 groups
[j
]->write
= 1;
2939 groups
[l
]->access
= NULL
;
2940 groups
[j
]->n_ref
+= groups
[l
]->n_ref
;
2950 /* Compute the size of the shared array corresponding to the given array
2951 * array refrence group, based on the accesses from the current kernel,
2952 * as well as the offset of the shared piece in the original array.
2954 static void compute_group_shared_bound(struct cuda_gen
*gen
,
2955 struct cuda_array_info
*array
, struct cuda_array_ref_group
*group
)
2957 isl_ctx
*ctx
= isl_dim_get_ctx(array
->dim
);
2959 group
->shared_bound
= create_bound_list(ctx
, array
->n_index
);
2960 if (!can_tile_for_shared_memory(gen
, array
, group
->access
,
2961 group
->shared_bound
)) {
2962 free_bound_list(group
->shared_bound
, array
->n_index
);
2963 group
->shared_bound
= NULL
;
2967 /* Given an initial grouping of array references and shared memory tiles
2968 * for each group that allows for a shared memory tile, merge two groups
2969 * if both have a shared memory tile and if the merged group also has
2970 * a shared memory tile.
2972 * Return the number of group leaders after merging.
2974 static int group_common_shared_memory_tile(struct cuda_gen
*gen
,
2975 struct cuda_array_info
*array
, int n
,
2976 struct cuda_array_ref_group
**groups
, int *leader
, int n_group
)
2979 isl_ctx
*ctx
= isl_dim_get_ctx(array
->dim
);
2981 for (i
= 0; n_group
> 1 && i
< n
; ++i
) {
2985 if (!groups
[i
]->shared_bound
)
2987 for (j
= i
- 1; j
>= 0; --j
) {
2990 struct cuda_array_bound
*shared_bound
;
2994 if (!groups
[j
]->shared_bound
)
2997 map
= isl_map_intersect(isl_map_copy(groups
[l
]->access
),
2998 isl_map_copy(groups
[j
]->access
));
2999 empty
= isl_map_is_empty(map
);
3005 map
= isl_map_union(isl_map_copy(groups
[l
]->access
),
3006 isl_map_copy(groups
[j
]->access
));
3007 shared_bound
= create_bound_list(ctx
, array
->n_index
);
3008 if (!can_tile_for_shared_memory(gen
, array
, map
,
3011 free_bound_list(shared_bound
, array
->n_index
);
3015 free_bound_list(groups
[j
]->shared_bound
,
3017 groups
[j
]->shared_bound
= shared_bound
;
3018 isl_map_free(groups
[j
]->access
);
3019 groups
[j
]->access
= map
;
3020 groups
[j
]->n_ref
+= groups
[l
]->n_ref
;
3029 /* Extract an array of array reference groups from the array of references
3030 * and the grouping information in "leader".
3032 * Store the results in array->n_group and array->groups.
3034 static void extract_array_groups(isl_ctx
*ctx
, struct cuda_array_info
*array
,
3035 int n
, struct cuda_array_ref_group
**groups
, int *leader
, int n_group
)
3039 for (i
= 2; i
< n
; ++i
)
3040 leader
[i
] = leader
[leader
[i
]];
3042 array
->n_group
= n_group
;
3043 array
->groups
= isl_alloc_array(ctx
, struct cuda_array_ref_group
*,
3045 assert(array
->groups
);
3048 for (i
= 0; i
< n
; ++i
) {
3050 struct cuda_stmt_access
**refs
;
3052 if (leader
[i
] != i
) {
3053 groups
[i
]->refs
= NULL
;
3054 free_array_ref_group(groups
[i
], array
->n_index
);
3058 refs
= isl_alloc_array(ctx
, struct cuda_stmt_access
*,
3062 for (k
= i
; k
< n
; ++k
)
3063 if (leader
[k
] == i
) {
3064 refs
[l
++] = *groups
[k
]->refs
;
3065 (*groups
[k
]->refs
)->group
= j
;
3068 groups
[i
]->refs
= refs
;
3070 array
->groups
[j
++] = groups
[i
];
3074 /* Group array references that should be considered together when
3075 * deciding whether to access them from private, shared or global memory.
3077 * In particular, if two array references overlap and if one of them
3078 * is a write, then the two references are grouped together.
3079 * Furthermore, if two groups admit a shared memory tile and if the
3080 * combination of the two also admits a shared memory tile, we merge
3083 * During the construction the group->refs field points to a single
3084 * array reference inside the array of array references, while
3085 * group->n_ref contains the number of element in leader that
3086 * (directly or indirectly) point to this group, provided the group
3089 static void group_array_references(struct cuda_gen
*gen
,
3090 struct cuda_array_info
*array
, __isl_keep isl_union_map
*sched
)
3094 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
3095 struct cuda_array_ref_group
**groups
;
3098 groups
= isl_calloc_array(ctx
, struct cuda_array_ref_group
*,
3102 n
= populate_array_references(gen
, array
, sched
, groups
);
3104 leader
= isl_alloc_array(ctx
, int, n
);
3107 n_group
= group_overlapping_writes(n
, groups
, leader
);
3109 for (i
= 0; i
< n
; ++i
)
3111 compute_group_shared_bound(gen
, array
, groups
[i
]);
3113 n_group
= group_common_shared_memory_tile(gen
, array
, n
, groups
,
3116 extract_array_groups(ctx
, array
, n
, groups
, leader
, n_group
);
3122 /* Take tiled_sched, project it onto the shared tile loops and
3123 * the loops that will be wrapped over the threads,
3124 * parametrize the shared tile loops and store the result in gen->shared_sched.
3125 * The position of the first of these parameters is stored in gen->first_shared.
3126 * Also compute a projection that projects out the loops that will be
3127 * wrapped over the threads and store this projection in gen->shared_proj.
3129 static void compute_shared_sched(struct cuda_gen
*gen
)
3134 isl_union_map
*sched
;
3136 sched
= isl_union_map_copy(gen
->tiled_sched
);
3138 dim
= isl_union_map_get_dim(sched
);
3139 gen
->first_shared
= isl_dim_size(dim
, isl_dim_param
);
3140 proj
= projection(dim
, gen
->tiled_len
, gen
->shared_len
+ gen
->n_block
);
3141 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
3143 dim
= isl_union_map_get_dim(sched
);
3144 par
= parametrization(dim
, gen
->shared_len
+ gen
->n_block
,
3145 0, gen
->shared_len
, "g");
3146 sched
= isl_union_map_intersect_range(sched
,
3147 isl_union_set_from_set(par
));
3149 dim
= isl_union_map_get_dim(sched
);
3150 proj
= projection(dim
, gen
->shared_len
+ gen
->n_block
, gen
->shared_len
);
3152 gen
->shared_sched
= sched
;
3153 gen
->shared_proj
= isl_union_map_from_map(proj
);
3156 /* Group references of all arrays in the program.
3158 static void group_references(struct cuda_gen
*gen
)
3161 isl_union_map
*sched
;
3163 sched
= isl_union_map_apply_range(isl_union_map_copy(gen
->shared_sched
),
3164 isl_union_map_copy(gen
->shared_proj
));
3166 for (i
= 0; i
< gen
->n_array
; ++i
)
3167 group_array_references(gen
, &gen
->array
[i
], sched
);
3169 isl_union_map_free(sched
);
3172 /* Free all array information that is local to the current kernel.
3174 static void free_local_array_info(struct cuda_gen
*gen
)
3178 for (i
= 0; i
< gen
->n_array
; ++i
) {
3179 struct cuda_array_info
*array
= &gen
->array
[i
];
3181 for (j
= 0; j
< array
->n_group
; ++j
)
3182 free_array_ref_group(array
->groups
[j
], array
->n_index
);
3183 free(array
->groups
);
3185 if (array
->n_group
== 0)
3187 for (j
= 0; j
< gen
->array
[i
].n_index
; ++j
) {
3188 isl_pw_qpolynomial_fold_free(gen
->array
[i
].local_bound
[j
]);
3189 gen
->array
[i
].local_bound
[j
] = NULL
;
3194 static void print_iterator_list(FILE *out
, int len
, const char *prefix
,
3200 for (i
= 0; i
< len
; ++i
) {
3204 fprintf(out
, "(%s%d)", prefix
, i
);
3206 fprintf(out
, "%s%d", prefix
, i
);
3211 /* Print an access to the element in the global memory copy of the
3212 * given array that corresponds to element [a0][a1]... of the original array.
3213 * The copy in global memory has been linearized, so we need to take
3214 * the array size into account.
3216 static void print_global_index(isl_ctx
*ctx
, FILE *out
,
3217 struct cuda_array_info
*array
)
3222 fprintf(out
, "%s[", array
->name
);
3223 for (i
= 0; i
+ 1 < array
->n_index
; ++i
)
3225 for (i
= 0; i
< array
->n_index
; ++i
) {
3227 prn
= isl_printer_to_file(ctx
, out
);
3228 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
3229 prn
= isl_printer_print_str(prn
, ") * (");
3230 prn
= isl_printer_print_pw_qpolynomial_fold(prn
,
3231 array
->local_bound
[i
]);
3232 prn
= isl_printer_print_str(prn
, ") + ");
3233 isl_printer_free(prn
);
3235 fprintf(out
, "a%d", i
);
3240 /* Print an access to the element in the shared memory copy of the
3241 * given array that corresponds to element [a0][a1]... of the original array.
3242 * Since the array in shared memory is just a shifted copy of part
3243 * of the original array, we simply need to subtract the lower bound,
3244 * which was computed in can_tile_for_shared_memory.
3245 * If any of the indices is strided, then we first add
3246 * shared_bound[i].shift and divide by shared_bound[i].stride.
3248 static void print_local_index(FILE *out
, struct cuda_array_ref_group
*group
)
3253 struct cuda_array_bound
*bounds
= group
->shared_bound
;
3255 ctx
= isl_dim_get_ctx(group
->array
->dim
);
3256 print_array_name(out
, group
);
3257 for (i
= 0; i
< group
->array
->n_index
; ++i
) {
3258 fprintf(out
, "[(a%d", i
);
3259 if (bounds
[i
].shift
) {
3260 fprintf(out
, " + (");
3261 prn
= isl_printer_to_file(ctx
, out
);
3262 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
3263 prn
= isl_printer_print_qpolynomial(prn
,
3265 prn
= isl_printer_print_str(prn
, "))/");
3266 prn
= isl_printer_print_isl_int(prn
,
3268 isl_printer_free(prn
);
3271 fprintf(out
, " - (");
3272 prn
= isl_printer_to_file(ctx
, out
);
3273 prn
= isl_printer_set_output_format(prn
, ISL_FORMAT_C
);
3274 prn
= isl_printer_print_qpolynomial(prn
, bounds
[i
].lb
);
3275 isl_printer_free(prn
);
3280 /* Print '#define's for copying data from global memory to shared
3281 * memory and back for the given array.
3283 static void print_array_copy_defines(struct cuda_gen
*gen
,
3284 struct cuda_array_ref_group
*group
)
3287 const char *type
[] = { "read", "write" };
3288 struct cuda_array_info
*array
= group
->array
;
3289 int n_index
= array
->n_index
;
3291 for (i
= 0; i
< 2; ++i
) {
3292 fprintf(gen
->cuda
.kernel_c
, "#define %s_", type
[i
]);
3293 print_array_name(gen
->cuda
.kernel_c
, group
);
3294 print_iterator_list(gen
->cuda
.kernel_c
, n_index
, "a", 0);
3295 fprintf(gen
->cuda
.kernel_c
, " %s_", type
[i
]);
3296 print_array_name(gen
->cuda
.kernel_c
, group
);
3297 fprintf(gen
->cuda
.kernel_c
, "_");
3298 print_iterator_list(gen
->cuda
.kernel_c
, n_index
, "a", 1);
3299 fprintf(gen
->cuda
.kernel_c
, "\n");
3301 fprintf(gen
->cuda
.kernel_c
, "#define %s_", type
[i
]);
3302 print_array_name(gen
->cuda
.kernel_c
, group
);
3303 fprintf(gen
->cuda
.kernel_c
, "_");
3304 print_iterator_list(gen
->cuda
.kernel_c
, n_index
, "a", 0);
3306 fprintf(gen
->cuda
.kernel_c
, " ");
3307 print_global_index(gen
->ctx
, gen
->cuda
.kernel_c
, array
);
3308 fprintf(gen
->cuda
.kernel_c
, " = ");
3309 print_local_index(gen
->cuda
.kernel_c
, group
);
3311 fprintf(gen
->cuda
.kernel_c
, " ");
3312 print_local_index(gen
->cuda
.kernel_c
, group
);
3313 fprintf(gen
->cuda
.kernel_c
, " = ");
3314 print_global_index(gen
->ctx
, gen
->cuda
.kernel_c
, array
);
3316 fprintf(gen
->cuda
.kernel_c
, "\n");
3320 static void print_copy_defines(struct cuda_gen
*gen
)
3324 for (i
= 0; i
< gen
->n_array
; ++i
) {
3325 struct cuda_array_info
*array
= &gen
->array
[i
];
3327 for (j
= 0; j
< array
->n_group
; ++j
) {
3328 if (array
->groups
[j
]->private_bound
)
3330 if (!array
->groups
[j
]->shared_bound
)
3332 print_array_copy_defines(gen
, array
->groups
[j
]);
3337 /* The sizes of the arrays on the host that have been computed by
3338 * extract_array_info may depend on the parameters. Use the extra
3339 * constraints on the parameters that are valid at "host_domain"
3340 * to simplify these expressions.
3342 static void localize_bounds(struct cuda_gen
*gen
,
3343 __isl_keep isl_set
*host_domain
)
3349 context
= isl_set_copy(host_domain
);
3350 nvar
= isl_set_dim(host_domain
, isl_dim_set
);
3351 context
= isl_set_project_out(host_domain
, isl_dim_set
, 0, nvar
);
3353 for (i
= 0; i
< gen
->n_array
; ++i
) {
3354 struct cuda_array_info
*array
= &gen
->array
[i
];
3356 if (array
->n_group
== 0)
3359 for (j
= 0; j
< array
->n_index
; ++j
) {
3360 isl_pw_qpolynomial_fold
*pwf
;
3362 pwf
= isl_pw_qpolynomial_fold_copy(array
->bound
[j
]);
3363 pwf
= isl_pw_qpolynomial_fold_gist(pwf
,
3364 isl_set_copy(context
));
3365 array
->local_bound
[j
] = pwf
;
3368 isl_set_free(context
);
3371 /* Set gen->tile_len and gen->n_parallel to those of the first statement
3372 * in the statement list u.
3373 * Because of the way the schedule is constructed, the other statements
3374 * in the list, if any, should have the same values for these properties.
3376 static void set_tile_len(struct cuda_gen
*gen
, struct clast_user_stmt
*u
)
3379 struct cuda_stmt
*stmt
;
3381 nr
= atoi(u
->statement
->name
+ 2);
3382 stmt
= &gen
->stmts
[nr
];
3384 gen
->tile_len
= stmt
->tile_len
;
3385 gen
->n_parallel
= stmt
->n_parallel
;
3388 /* This function is called for each leaf in the clast of the host code.
3389 * We first specialize the schedule to the site of the leaf, compute
3390 * the size of shared memory and then print the body of host code
3391 * and the associated kernel (through a call to print_kernel_body).
3393 static void print_host_user(struct gpucode_info
*code
,
3394 struct clast_user_stmt
*u
)
3396 struct cuda_gen
*gen
= code
->user
;
3399 isl_set
*host_domain
;
3400 isl_union_map
*access
;
3401 isl_union_map
*local_sched
;
3402 isl_union_set
*arrays
;
3404 set_tile_len(gen
, u
);
3407 host_domain
= extract_entire_host_domain(u
);
3409 local_sched
= isl_union_map_intersect_range(
3410 isl_union_map_copy(gen
->sched
),
3411 isl_union_set_from_set(extend(isl_set_copy(host_domain
),
3412 gen
->untiled_len
)));
3413 access
= isl_union_map_union(isl_union_map_copy(gen
->read
),
3414 isl_union_map_copy(gen
->write
));
3415 access
= isl_union_map_apply_domain(access
,
3416 isl_union_map_copy(local_sched
));
3417 arrays
= isl_union_map_range(access
);
3419 print_indent(code
->dst
, code
->indent
);
3420 fprintf(code
->dst
, "dim3 k%d_dimBlock(", gen
->kernel_id
);
3421 print_reverse_list(code
->dst
, gen
->n_block
, gen
->block_dim
);
3422 fprintf(code
->dst
, ");\n");
3424 print_indent(code
->dst
, code
->indent
);
3425 fprintf(code
->dst
, "dim3 k%d_dimGrid(", gen
->kernel_id
);
3426 print_reverse_list(code
->dst
, gen
->n_grid
, gen
->grid_dim
);
3427 fprintf(code
->dst
, ");\n");
3429 gen
->tiled_sched
= tile_schedule(gen
, local_sched
);
3430 gen
->tiled_sched
= parametrize_tiled_schedule(gen
, gen
->tiled_sched
);
3431 gen
->tiled_sched
= scale_tile_loops(gen
, gen
->tiled_sched
);
3433 gen
->local_sched
= isl_union_map_copy(gen
->tiled_sched
);
3435 dim
= isl_union_map_get_dim(gen
->local_sched
);
3436 par
= parametrization(dim
, gen
->tiled_len
, 0, gen
->shared_len
, "g");
3437 gen
->local_sched
= isl_union_map_intersect_range(gen
->local_sched
,
3438 isl_union_set_from_set(par
));
3440 gen
->local_sched
= thread_tile_schedule(gen
, gen
->local_sched
);
3441 gen
->local_sched
= scale_thread_tile_loops(gen
, gen
->local_sched
);
3443 gen
->private_access
= NULL
;
3444 compute_shared_sched(gen
);
3445 gen
->privatization
= compute_privatization(gen
);
3446 group_references(gen
);
3447 compute_private_size(gen
);
3448 localize_bounds(gen
, host_domain
);
3450 gen
->local_sched
= interchange_for_unroll(gen
, gen
->local_sched
);
3452 print_copy_defines(gen
);
3453 print_kernel_launch(gen
, arrays
);
3455 fprintf(gen
->cuda
.kernel_c
, "{\n");
3457 print_kernel_body(gen
, host_domain
, gen
->tiled_sched
);
3459 fprintf(gen
->cuda
.kernel_c
, "}\n");
3461 free_local_array_info(gen
);
3462 isl_map_free(gen
->privatization
);
3463 isl_union_map_free(gen
->private_access
);
3464 isl_union_map_free(gen
->local_sched
);
3465 isl_union_map_free(gen
->tiled_sched
);
3466 isl_union_map_free(gen
->shared_sched
);
3467 isl_union_map_free(gen
->shared_proj
);
3468 isl_union_set_free(arrays
);
3469 isl_set_free(host_domain
);
3471 free(gen
->tile_size
);
3475 /* Use CLooG to generate code for the outer gen->tile_first loops
3476 * of the global schedule in gen->sched.
3477 * The pretty printing of this code is handled by gpu_print_host_stmt,
3478 * which calls print_host_user for each kernel invocation location.
3480 static void print_cloog_host_code(struct cuda_gen
*gen
)
3484 isl_union_map
*sched
;
3485 CloogOptions
*options
;
3486 CloogDomain
*cloog_context
;
3487 CloogUnionDomain
*ud
;
3489 struct clast_stmt
*stmt
;
3492 options
= cloog_options_malloc(gen
->state
);
3493 options
->language
= LANGUAGE_C
;
3495 options
->strides
= 1;
3496 options
->stop
= gen
->tile_first
;
3497 options
->f
= gen
->untiled_len
;
3498 options
->l
= gen
->untiled_len
;
3499 options
->save_domains
= 1;
3500 options
->noscalars
= 1;
3502 sched
= isl_union_map_copy(gen
->sched
);
3503 ud
= cloog_union_domain_from_isl_union_map(sched
);
3504 for (i
= 0; i
< options
->stop
; ++i
) {
3505 snprintf(name
, sizeof(name
), "h%d", i
);
3506 ud
= cloog_union_domain_set_name(ud
, CLOOG_SCAT
, i
, name
);
3508 context
= isl_set_copy(gen
->context
);
3509 cloog_context
= cloog_domain_from_isl_set(context
);
3510 input
= cloog_input_alloc(cloog_context
, ud
);
3512 stmt
= cloog_clast_create_from_input(input
, options
);
3514 gen
->code
.indent
= 0;
3515 gen
->code
.dst
= gen
->cuda
.host_c
;
3516 gen
->code
.print_user_stmt
= NULL
;
3517 gen
->code
.print_user_stmt_list
= &print_host_user
;
3518 gen
->code
.print_for_head
= NULL
;
3519 gen
->code
.print_for_foot
= NULL
;
3520 gen
->code
.user
= gen
;
3521 gpu_print_host_stmt(&gen
->code
, stmt
);
3523 cloog_clast_free(stmt
);
3524 cloog_options_free(options
);
3527 void print_host_code(struct cuda_gen
*gen
)
3529 fprintf(gen
->cuda
.host_c
, "{\n");
3530 print_cloog_macros(gen
->cuda
.host_c
);
3531 print_cloog_macros(gen
->cuda
.kernel_c
);
3533 declare_device_arrays(gen
);
3535 allocate_device_arrays(gen
);
3536 copy_arrays_to_device(gen
);
3539 print_cloog_host_code(gen
);
3541 copy_arrays_from_device(gen
);
3542 free_device_arrays(gen
);
3544 fprintf(gen
->cuda
.host_c
, "}\n");
3547 __isl_give isl_set
*add_context_from_str(__isl_take isl_set
*set
,
3556 ctx
= isl_set_get_ctx(set
);
3557 context
= isl_set_read_from_str(ctx
, str
, -1);
3558 context
= isl_set_align_params(context
, isl_set_get_dim(set
));
3559 set
= isl_set_intersect(set
, context
);
3564 /* Convert scop->context to an isl_set.
3566 static __isl_give isl_set
*extract_context(isl_ctx
*ctx
, scoplib_scop_p scop
)
3570 dim
= isl_dim_set_alloc(ctx
, scop
->nb_parameters
, 0);
3571 dim
= set_dim_names(dim
, isl_dim_param
, scop
->parameters
);
3572 return scoplib_matrix_to_isl_set(scop
->context
, dim
);
3575 /* Return an array of cuda_stmt representing the statements in "scop".
3577 static struct cuda_stmt
*extract_stmts(isl_ctx
*ctx
, scoplib_scop_p scop
,
3578 __isl_keep isl_set
*context
)
3581 struct cuda_stmt
*stmts
;
3582 scoplib_statement_p stmt
= scop
->statement
;
3584 n
= scoplib_statement_number(scop
->statement
);
3585 stmts
= isl_calloc_array(ctx
, struct cuda_stmt
, n
);
3588 for (stmt
= scop
->statement
, n
= 0; stmt
; stmt
= stmt
->next
, n
++) {
3591 struct cuda_stmt
*s
= &stmts
[n
];
3593 snprintf(name
, sizeof(name
), "S_%d", n
);
3595 dim
= isl_dim_set_alloc(ctx
, scop
->nb_parameters
,
3596 stmt
->nb_iterators
);
3597 dim
= set_dim_names(dim
, isl_dim_param
, scop
->parameters
);
3598 dim
= set_dim_names(dim
, isl_dim_set
, stmt
->iterators
);
3599 dim
= isl_dim_set_tuple_name(dim
, isl_dim_set
, name
);
3600 dim
= set_dim_names(dim
, isl_dim_set
, stmt
->iterators
);
3601 s
->domain
= scoplib_matrix_list_to_isl_set(stmt
->domain
,
3603 s
->domain
= isl_set_intersect(s
->domain
, isl_set_copy(context
));
3604 s
->text
= strdup(stmt
->body
);
3605 stmt_extract_accesses(s
);
3611 /* Extract all the read and write accesses from "scop" and store
3612 * them in gen->read and gen->write.
3614 static void extract_accesses(struct cuda_gen
*gen
, scoplib_scop_p scop
)
3617 int n
= scoplib_statement_number(scop
->statement
);
3619 scoplib_statement_p stmt
;
3621 dim
= isl_set_get_dim(gen
->context
);
3622 gen
->write
= isl_union_map_empty(isl_dim_copy(dim
));
3623 gen
->read
= isl_union_map_empty(dim
);
3625 for (i
= 0, stmt
= scop
->statement
; i
< n
; ++i
, stmt
= stmt
->next
) {
3626 isl_union_map
*read_i
;
3627 isl_union_map
*write_i
;
3629 read_i
= scoplib_access_to_isl_union_map(stmt
->read
,
3630 isl_set_copy(gen
->stmts
[i
].domain
),
3632 write_i
= scoplib_access_to_isl_union_map(stmt
->write
,
3633 isl_set_copy(gen
->stmts
[i
].domain
),
3636 gen
->read
= isl_union_map_union(gen
->read
, read_i
);
3637 gen
->write
= isl_union_map_union(gen
->write
, write_i
);
3641 /* Extract and return the original schedule of the program from "scop".
3643 static isl_union_map
*extract_original_schedule(struct cuda_gen
*gen
,
3644 scoplib_scop_p scop
)
3647 int n
= scoplib_statement_number(scop
->statement
);
3649 isl_union_map
*sched
;
3650 scoplib_statement_p stmt
;
3652 dim
= isl_set_get_dim(gen
->context
);
3653 sched
= isl_union_map_empty(dim
);
3655 for (i
= 0, stmt
= scop
->statement
; i
< n
; ++i
, stmt
= stmt
->next
) {
3658 dim
= isl_set_get_dim(gen
->stmts
[i
].domain
);
3659 dim
= isl_dim_from_domain(dim
);
3660 dim
= isl_dim_add(dim
, isl_dim_out
, 2 * stmt
->nb_iterators
+ 1);
3661 sched_i
= scoplib_schedule_to_isl_map(stmt
->schedule
, dim
);
3663 sched
= isl_union_map_union(sched
,
3664 isl_union_map_from_map(sched_i
));
3670 /* Return the union of all iteration domains of the gen->stmts[i].
3672 static __isl_give isl_union_set
*extract_domain(struct cuda_gen
*gen
)
3675 isl_union_set
*domain
;
3677 domain
= isl_union_set_empty(isl_set_get_dim(gen
->context
));
3678 for (i
= 0; i
< gen
->n_stmts
; ++i
) {
3681 domain_i
= isl_set_copy(gen
->stmts
[i
].domain
);
3682 domain
= isl_union_set_union(domain
,
3683 isl_union_set_from_set(domain_i
));
3689 /* Information about the outermost tilable bands in the forest of bands.
3691 * tile_len and n_parallel are only sets on band_info structures
3692 * that correspond to outermost bands. For other bands (in particular,
3693 * ancestors of the outermost bands), n_parallal is set to 0.
3695 * prefix is the (padded) schedule leading up to the outermost tilable bands.
3697 * tile_first is the number of schedule dimensions in prefix.
3699 * suffix is the schedule of the outermost tilable bands and their descendants.
3702 struct cuda_gen
*gen
;
3706 isl_union_map
*prefix
;
3707 isl_union_map
*suffix
;
3710 /* Set tile_len and n_parallel of the statement to that of
3711 * their outermost band, recorded in the band_info.
3713 static int set_stmt_tile_len(__isl_take isl_map
*map
, void *user
)
3715 struct band_info
*info
= user
;
3717 struct cuda_stmt
*stmt
;
3719 nr
= atoi(isl_map_get_tuple_name(map
, isl_dim_in
) + 2);
3720 stmt
= &info
->gen
->stmts
[nr
];
3722 stmt
->tile_len
= info
->tile_len
;
3723 stmt
->n_parallel
= info
->n_parallel
;
3730 static void list_select_outer_band(struct cuda_gen
*gen
,
3731 __isl_take isl_band_list
*list
, int pos
, struct band_info
*list_info
);
3733 /* Check if this band has any parallel loops. If so, take it as
3734 * the outermost tilable band. If not, continue looking for the
3735 * outermost tilable band in the children of the current band.
3737 static void band_select_outer_band(struct cuda_gen
*gen
,
3738 __isl_take isl_band
*band
, int pos
, struct band_info
*info
)
3740 int n
= isl_band_n_member(band
);
3743 for (n_parallel
= 0; n_parallel
< n
; ++n_parallel
)
3744 if (!isl_band_member_is_zero_distance(band
, n_parallel
))
3747 info
->n_parallel
= n_parallel
;
3750 info
->tile_first
= pos
;
3752 info
->prefix
= isl_band_get_prefix_schedule(band
);
3753 info
->suffix
= isl_union_map_flat_range_product(
3754 isl_band_get_partial_schedule(band
),
3755 isl_band_get_suffix_schedule(band
));
3756 isl_union_map_foreach_map(info
->prefix
,
3757 &set_stmt_tile_len
, info
);
3759 isl_band_list
*children
;
3760 assert(isl_band_has_children(band
));
3761 children
= isl_band_get_children(band
);
3762 list_select_outer_band(gen
, children
, pos
+ n
, info
);
3765 isl_band_free(band
);
3768 /* Comparison function that returns a non-zero value for band_infos
3769 * with different tile_len fields or different n_parallel fields.
3771 static int cmp_band(const void *p1
, const void *p2
)
3773 const struct band_info
*info1
= p1
;
3774 const struct band_info
*info2
= p2
;
3776 if (info1
->tile_len
!= info2
->tile_len
)
3777 return info1
->tile_len
- info2
->tile_len
;
3779 return info1
->n_parallel
- info2
->n_parallel
;
3782 /* Extend "umap" with coordinates with fixed value "val"
3783 * to a total length of "dst_len", assuming the original dimension is "src_len".
3785 static __isl_give isl_union_map
*extend_range(__isl_take isl_union_map
*umap
,
3786 int src_len
, int dst_len
, int val
)
3792 dim
= isl_union_map_get_dim(umap
);
3793 map
= isl_map_reverse(projection(dim
, dst_len
, src_len
));
3794 for (i
= src_len
; i
< dst_len
; ++i
)
3795 map
= isl_map_fix_si(map
, isl_dim_out
, i
, val
);
3797 umap
= isl_union_map_apply_range(umap
, isl_union_map_from_map(map
));
3802 /* Group bands with the same values for tile_len and n_parallel.
3803 * The prefix schedule is then extended with a fixed coordinate that
3804 * is different for each such group.
3805 * Note that the actual values for this coordinate are not important.
3806 * The bands have already been effectively separated at a higher level
3807 * or they are independent and may be executed in parallel.
3808 * The list of band_info has been sorted before this functions is called.
3810 static void separate_bands(struct band_info
*info
, int n
)
3815 for (i
= 0; i
< n
; ++i
) {
3816 int l
= info
[i
].tile_first
;
3819 (info
[i
].tile_len
!= info
[i
- 1].tile_len
||
3820 info
[i
].n_parallel
!= info
[i
- 1].n_parallel
))
3823 info
[i
].prefix
= extend_range(info
[i
].prefix
,
3825 info
[i
].tile_first
= l
+ 1;
3829 /* Select the outermost bands in the elements of the list, align
3830 * their prefix schedules, separate bands with different values
3831 * for tile_len and/or n_parallel and then combine the resulting
3832 * prefix and suffix schedules into a single pair of prefix and
3833 * suffix schedules for the entire list.
3835 static void list_select_outer_band(struct cuda_gen
*gen
,
3836 __isl_take isl_band_list
*list
, int pos
, struct band_info
*list_info
)
3840 int n
= isl_band_list_n_band(list
);
3841 isl_ctx
*ctx
= isl_band_list_get_ctx(list
);
3842 struct band_info
*info
;
3844 isl_union_map
*prefix
;
3845 isl_union_map
*suffix
;
3848 info
= isl_calloc_array(ctx
, struct band_info
, n
);
3852 for (i
= 0; i
< n
; ++i
) {
3853 band
= isl_band_list_get_band(list
, i
);
3854 band_select_outer_band(gen
, band
, pos
, &info
[i
]);
3855 if (info
[i
].tile_first
> max_tile_first
)
3856 max_tile_first
= info
[i
].tile_first
;
3859 for (i
= 0; i
< n
; ++i
) {
3860 if (info
[i
].tile_first
== max_tile_first
)
3862 info
[i
].prefix
= extend_range(info
[i
].prefix
,
3863 info
[i
].tile_first
, max_tile_first
, 0);
3866 qsort(info
, n
, sizeof(struct band_info
), &cmp_band
);
3868 for (i
= 0; i
< n
- 1; ++i
)
3869 if (info
[i
].tile_len
!= info
[i
+ 1].tile_len
||
3870 info
[i
].n_parallel
!= info
[i
+ 1].n_parallel
)
3874 separate_bands(info
, n
);
3876 prefix
= info
[0].prefix
;
3877 suffix
= info
[0].suffix
;
3879 for (i
= 1; i
< n
; ++i
) {
3880 prefix
= isl_union_map_union(prefix
, info
[i
].prefix
);
3881 suffix
= isl_union_map_union(suffix
, info
[i
].suffix
);
3884 list_info
->tile_first
= info
[0].tile_first
;
3885 list_info
->tile_len
= -1;
3886 list_info
->prefix
= prefix
;
3887 list_info
->suffix
= suffix
;
3889 isl_band_list_free(list
);
3893 /* Set max_out to the maximal number of output dimensions over
3896 static int update_max_out(__isl_take isl_map
*map
, void *user
)
3898 int *max_out
= user
;
3899 int n_out
= isl_map_dim(map
, isl_dim_out
);
3901 if (n_out
> *max_out
)
3908 struct align_range_data
{
3913 /* Extend the dimension of the range of the given map to data->max_out and
3914 * then add the result to data->res.
3916 static int map_align_range(__isl_take isl_map
*map
, void *user
)
3918 struct align_range_data
*data
= user
;
3922 int n_out
= isl_map_dim(map
, isl_dim_out
);
3924 dim
= isl_union_map_get_dim(data
->res
);
3925 proj
= isl_map_reverse(projection(dim
, data
->max_out
, n_out
));
3926 for (i
= n_out
; i
< data
->max_out
; ++i
)
3927 proj
= isl_map_fix_si(proj
, isl_dim_out
, i
, 0);
3929 map
= isl_map_apply_range(map
, proj
);
3931 data
->res
= isl_union_map_add_map(data
->res
, map
);
3936 /* Extend the ranges of the maps in the union map such they all have
3937 * the same dimension.
3939 static __isl_give isl_union_map
*align_range(__isl_take isl_union_map
*umap
)
3941 struct align_range_data data
;
3944 isl_union_map_foreach_map(umap
, &update_max_out
, &data
.max_out
);
3946 data
.res
= isl_union_map_empty(isl_union_map_get_dim(umap
));
3947 isl_union_map_foreach_map(umap
, &map_align_range
, &data
);
3949 isl_union_map_free(umap
);
3953 /* Select the outermost tilable band that (by construction)
3954 * has at least one parallel loop.
3955 * The starting position of the aligned band is stored in the pair
3957 * The sizes and number of parallel loops may be different in different
3958 * parts of the band forest and are therefore stored in the cuda_stmts.
3960 * Return the complete schedule, with the tilable bands aligned
3961 * at gen->tile_first and padded with zero, if needed.
3963 static __isl_give isl_union_map
*select_outer_tilable_band(struct cuda_gen
*gen
,
3964 __isl_keep isl_schedule
*schedule
)
3966 isl_band_list
*list
;
3967 struct band_info info
;
3969 gen
->n_parallel
= 0;
3972 list
= isl_schedule_get_band_forest(schedule
);
3974 list_select_outer_band(gen
, list
, 0, &info
);
3976 gen
->tile_first
= info
.tile_first
;
3977 info
.suffix
= align_range(info
.suffix
);
3979 return isl_union_map_flat_range_product(info
.prefix
, info
.suffix
);
3982 /* Set gen->untiled_len to the number of scheduling dimensions
3983 * for the schedule of the first domain.
3984 * We assume here that this number is the same for all domains.
3986 static int set_untiled_len(__isl_take isl_map
*map
, void *user
)
3988 unsigned *untiled_len
= user
;
3990 *untiled_len
= isl_map_dim(map
, isl_dim_out
);
3996 /* Compute an appropriate schedule based on the accesses in
3997 * gen->read and gen->write.
3999 * We first compute dependences and then use those to compute
4000 * a schedule that has a parallel loop in each tilable band.
4001 * Finally, we select the outermost tilable band.
4003 static void compute_schedule(struct cuda_gen
*gen
,
4004 __isl_take isl_union_map
*sched
)
4006 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
4007 isl_union_set
*domain
;
4008 isl_union_map
*empty
;
4009 isl_union_map
*dep_raw
, *dep2
, *dep3
, *dep
;
4010 isl_union_map
*uninitialized
;
4011 isl_schedule
*schedule
;
4012 struct isl_options
*options
;
4014 empty
= isl_union_map_empty(isl_union_map_get_dim(sched
));
4016 isl_union_map_compute_flow(isl_union_map_copy(gen
->read
),
4017 isl_union_map_copy(gen
->write
), empty
,
4018 isl_union_map_copy(sched
),
4019 &dep_raw
, NULL
, &uninitialized
, NULL
);
4020 isl_union_map_compute_flow(isl_union_map_copy(gen
->write
),
4021 isl_union_map_copy(gen
->write
),
4022 isl_union_map_copy(gen
->read
),
4023 isl_union_map_copy(sched
),
4024 &dep2
, &dep3
, NULL
, NULL
);
4025 isl_union_map_free(sched
);
4027 gen
->copy_in
= isl_union_map_range(uninitialized
);
4029 dep
= isl_union_map_union(dep2
, dep3
);
4030 dep
= isl_union_map_union(dep
, dep_raw
);
4031 dep
= isl_union_map_coalesce(dep
);
4033 domain
= extract_domain(gen
);
4034 options
= isl_ctx_peek_options(ctx
, isl_options_arg
);
4035 options
->schedule_outer_zero_distance
= 1;
4036 schedule
= isl_union_set_compute_schedule(isl_union_set_copy(domain
),
4037 isl_union_map_copy(dep
), dep
);
4039 sched
= select_outer_tilable_band(gen
, schedule
);
4041 isl_union_map_foreach_map(sched
, &set_untiled_len
, &gen
->untiled_len
);
4042 sched
= isl_union_map_intersect_domain(sched
, domain
);
4045 isl_schedule_free(schedule
);
4048 /* Replace the scop in the "input" file by equivalent code
4049 * that uses the GPU. "scop" is assumed to correspond to this scop.
4051 * We first compute a schedule that respects the dependences
4052 * of the original program and select the outermost band
4053 * of tilable dimensions that has at least one parallel loop.
4054 * We then have three blocks of dimensions
4058 * The tilable band "B" is first tiled according to "tile.sizes", resulting
4063 * For each iteration of the T loop and for each array, we compute
4064 * the array elements accessed by that iteration, construct a rectangular
4065 * box around it and shift it to the origin. The result is used
4066 * as shared memory for the array.
4068 * We then split off at most 2 parallel loops from the T loops and
4069 * at most 3 parallel loops from the P loops
4073 * The T1/P1 loops are then tiled or "wrapped" over the blocks/threads,
4074 * according to "grid.sizes"/"block.sizes".
4076 * H T1T T1P T2 P1T P1P P2 G
4078 * Finally, the T1P and P1P iterators are equated to the block and
4079 * thread dimensions respectively and so are effectively removed.
4080 * The H loops are run on the host. The T1T, T2, P1T, P2 and G loops
4081 * are run on the GPU.
4083 * Code is generated in three stages. We first generate code for the
4084 * host (the H loops), with iterators h%d. Then, for each leaf node
4085 * of the resulting AST, we generate code for the shared loops (up to
4086 * and including T2), with iterators g%d and after equating the H loops
4087 * to h%d parameters and the T1P loops to the block dimensions.
4088 * Finally, we generate code for the remaining loops in a similar fashion.
4090 * The function frees "scop" and "ctx".
4092 int cuda_scop(isl_ctx
*ctx
, scoplib_scop_p scop
, struct ppcg_options
*options
,
4095 isl_union_map
*sched
;
4096 struct cuda_gen gen
;
4099 gen
.context
= extract_context(gen
.ctx
, scop
);
4100 gen
.context
= add_context_from_str(gen
.context
, options
->ctx
);
4101 gen
.n_stmts
= scoplib_statement_number(scop
->statement
);
4102 gen
.stmts
= extract_stmts(gen
.ctx
, scop
, gen
.context
);
4103 extract_accesses(&gen
, scop
);
4104 gen
.options
= options
;
4105 gen
.state
= cloog_isl_state_malloc(gen
.ctx
);
4107 cuda_open_files(&gen
.cuda
, input
);
4109 collect_array_info(&gen
);
4111 sched
= extract_original_schedule(&gen
, scop
);
4112 compute_schedule(&gen
, sched
);
4114 print_host_code(&gen
);
4116 cloog_state_free(gen
.state
);
4117 clear_cuda_gen(&gen
);
4118 isl_ctx_free(gen
.ctx
);
4119 scoplib_scop_free(scop
);
4121 cuda_close_files(&gen
.cuda
);