gpu_array_ref_group: clarify documentation
[ppcg.git] / gpu_array_tile.h
blob0b437dc22dcf85eb5d3c3d3cbe082c99d85895d5
1 #ifndef GPU_ARRAY_TILE_H
2 #define GPU_ARRAY_TILE_H
4 #include <isl/aff_type.h>
5 #include <isl/map_type.h>
6 #include <isl/val.h>
8 /* The fields stride and shift only contain valid information
9 * if shift != NULL.
10 * If so, they express that current index is such that if you add shift,
11 * then the result is always a multiple of stride.
12 * Let D represent the initial group->depth dimensions of the computed schedule.
13 * The spaces of "lb" and "shift" are of the form
15 * D -> [b]
17 struct gpu_array_bound {
18 isl_val *size;
19 isl_aff *lb;
21 isl_val *stride;
22 isl_aff *shift;
25 /* A tile of an array.
27 * requires_unroll is set if the schedule dimensions that are mapped
28 * to threads need to be unrolled for this (private) tile to be used.
30 * n is the dimension of the array.
31 * bound is an array of size "n" representing the lower bound
32 * and size for each index.
34 * tiling maps a tile in the global array to the corresponding
35 * shared/private memory tile and is of the form
37 * { [D[i] -> A[a]] -> T[(a + shift(i))/stride - lb(i)] }
39 * where D represents the initial group->depth dimensions
40 * of the computed schedule.
42 struct gpu_array_tile {
43 isl_ctx *ctx;
44 int requires_unroll;
45 int n;
46 struct gpu_array_bound *bound;
47 isl_multi_aff *tiling;
50 struct gpu_array_tile *gpu_array_tile_create(isl_ctx *ctx, int n_index);
51 struct gpu_array_tile *gpu_array_tile_free(struct gpu_array_tile *tile);
53 __isl_give isl_val *gpu_array_tile_size(struct gpu_array_tile *tile);
55 #endif