2 * Copyright 2013 Ecole Normale Superieure
4 * Use of this software is governed by the MIT license
6 * Written by Sven Verdoolaege,
7 * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
13 #include <isl/union_set.h>
17 /* The functions in this file are used to navigate part of a schedule tree
18 * that is mapped to blocks. Initially, this part consists of a linear
19 * branch segment with a mark node with name "kernel" on the outer end
20 * and a mark node with name "thread" on the inner end.
21 * During the mapping to blocks, branching may be introduced, but only
22 * one of the elements in each sequence contains the "thread" mark.
23 * The filter of this element (and only this filter) contains
24 * domain elements identified by the "core" argument of the functions
25 * that move down this tree.
27 * Synchronization statements have a name that starts with "sync" and
28 * a user pointer pointing to the kernel that contains the synchronization.
29 * The functions inserting or detecting synchronizations take a ppcg_kernel
30 * argument to be able to create or identify such statements.
31 * They may also use two fields in this structure, the "core" field
32 * to move around in the tree and the "n_sync" field to make sure that
33 * each synchronization has a different name (within the kernel).
36 /* Is "node" a mark node with an identifier called "name"?
38 static int is_marked(__isl_keep isl_schedule_node
*node
, const char *name
)
46 if (isl_schedule_node_get_type(node
) != isl_schedule_node_mark
)
49 mark
= isl_schedule_node_mark_get_id(node
);
53 has_name
= !strcmp(isl_id_get_name(mark
), name
);
59 /* Is "node" a mark node with an identifier called "kernel"?
61 int gpu_tree_node_is_kernel(__isl_keep isl_schedule_node
*node
)
63 return is_marked(node
, "kernel");
66 /* Is "node" a mark node with an identifier called "thread"?
68 static int node_is_thread(__isl_keep isl_schedule_node
*node
)
70 return is_marked(node
, "thread");
73 /* Assuming "node" is a filter node, does it correspond to the branch
74 * that contains the "thread" mark, i.e., does it contain any elements
77 static int node_is_core(__isl_keep isl_schedule_node
*node
,
78 __isl_keep isl_union_set
*core
)
81 isl_union_set
*filter
;
83 filter
= isl_schedule_node_filter_get_filter(node
);
84 disjoint
= isl_union_set_is_disjoint(filter
, core
);
85 isl_union_set_free(filter
);
92 /* Move to the only child of "node" that has the "thread" mark as descendant,
93 * where the branch containing this mark is identified by the domain elements
96 * If "node" is not a sequence, then it only has one child and we move
97 * to that single child.
98 * Otherwise, we check each of the filters in the children, pick
99 * the one that corresponds to "core" and return a pointer to the child
100 * of the filter node.
102 static __isl_give isl_schedule_node
*core_child(
103 __isl_take isl_schedule_node
*node
, __isl_keep isl_union_set
*core
)
107 if (isl_schedule_node_get_type(node
) != isl_schedule_node_sequence
)
108 return isl_schedule_node_child(node
, 0);
110 n
= isl_schedule_node_n_children(node
);
111 for (i
= 0; i
< n
; ++i
) {
114 node
= isl_schedule_node_child(node
, i
);
115 is_core
= node_is_core(node
, core
);
118 return isl_schedule_node_free(node
);
120 return isl_schedule_node_child(node
, 0);
122 node
= isl_schedule_node_parent(node
);
125 isl_die(isl_schedule_node_get_ctx(node
), isl_error_internal
,
126 "core child not found", return isl_schedule_node_free(node
));
129 /* Move down the branch between "kernel" and "thread" until
130 * the "thread" mark is reached, where the branch containing the "thread"
131 * mark is identified by the domain elements in "core".
133 __isl_give isl_schedule_node
*gpu_tree_move_down_to_thread(
134 __isl_take isl_schedule_node
*node
, __isl_keep isl_union_set
*core
)
138 while ((is_thread
= node_is_thread(node
)) == 0)
139 node
= core_child(node
, core
);
141 node
= isl_schedule_node_free(node
);
146 /* Move up the tree underneath the "thread" mark until
147 * the "thread" mark is reached.
149 __isl_give isl_schedule_node
*gpu_tree_move_up_to_thread(
150 __isl_take isl_schedule_node
*node
)
154 while ((is_thread
= node_is_thread(node
)) == 0)
155 node
= isl_schedule_node_parent(node
);
157 node
= isl_schedule_node_free(node
);
162 /* Move up the tree underneath the "kernel" mark until
163 * the "kernel" mark is reached.
165 __isl_give isl_schedule_node
*gpu_tree_move_up_to_kernel(
166 __isl_take isl_schedule_node
*node
)
170 while ((is_kernel
= gpu_tree_node_is_kernel(node
)) == 0)
171 node
= isl_schedule_node_parent(node
);
173 node
= isl_schedule_node_free(node
);
178 /* Move down from the "kernel" mark (or at least a node with schedule
179 * depth smaller than or equal to "depth") to a band node at schedule
180 * depth "depth". The "thread" mark is assumed to have a schedule
181 * depth greater than or equal to "depth". The branch containing the
182 * "thread" mark is identified by the domain elements in "core".
184 * If the desired schedule depth is in the middle of band node,
185 * then the band node is split into two pieces, the second piece
186 * at the desired schedule depth.
188 __isl_give isl_schedule_node
*gpu_tree_move_down_to_depth(
189 __isl_take isl_schedule_node
*node
, int depth
,
190 __isl_keep isl_union_set
*core
)
194 while (node
&& isl_schedule_node_get_schedule_depth(node
) < depth
) {
195 if (isl_schedule_node_get_type(node
) ==
196 isl_schedule_node_band
) {
197 int node_depth
, node_dim
;
198 node_depth
= isl_schedule_node_get_schedule_depth(node
);
199 node_dim
= isl_schedule_node_band_n_member(node
);
200 if (node_depth
+ node_dim
> depth
)
201 node
= isl_schedule_node_band_split(node
,
204 node
= core_child(node
, core
);
206 while ((is_thread
= node_is_thread(node
)) == 0 &&
207 isl_schedule_node_get_type(node
) != isl_schedule_node_band
)
208 node
= core_child(node
, core
);
210 node
= isl_schedule_node_free(node
);
215 /* Create a union set containing a single set with a tuple identifier
216 * called "syncX" and user pointer equal to "kernel".
218 static __isl_give isl_union_set
*create_sync_domain(struct ppcg_kernel
*kernel
)
224 space
= isl_space_set_alloc(kernel
->ctx
, 0, 0);
225 snprintf(name
, sizeof(name
), "sync%d", kernel
->n_sync
++);
226 id
= isl_id_alloc(kernel
->ctx
, name
, kernel
);
227 space
= isl_space_set_tuple_id(space
, isl_dim_set
, id
);
228 return isl_union_set_from_set(isl_set_universe(space
));
231 /* Is "id" the identifier of a synchronization statement inside "kernel"?
232 * That is, does its name start with "sync" and does it point to "kernel"?
234 int gpu_tree_id_is_sync(__isl_keep isl_id
*id
, struct ppcg_kernel
*kernel
)
238 name
= isl_id_get_name(id
);
241 else if (strncmp(name
, "sync", 4))
243 return isl_id_get_user(id
) == kernel
;
246 /* Does "domain" consist of a single set with a tuple identifier
247 * corresponding to a synchronization for "kernel"?
249 static int domain_is_sync(__isl_keep isl_union_set
*domain
,
250 struct ppcg_kernel
*kernel
)
256 if (isl_union_set_n_set(domain
) != 1)
258 set
= isl_set_from_union_set(isl_union_set_copy(domain
));
259 id
= isl_set_get_tuple_id(set
);
260 is_sync
= gpu_tree_id_is_sync(id
, kernel
);
267 /* Does "node" point to a filter selecting a synchronization statement
270 static int node_is_sync_filter(__isl_keep isl_schedule_node
*node
,
271 struct ppcg_kernel
*kernel
)
274 enum isl_schedule_node_type type
;
275 isl_union_set
*domain
;
279 type
= isl_schedule_node_get_type(node
);
280 if (type
!= isl_schedule_node_filter
)
282 domain
= isl_schedule_node_filter_get_filter(node
);
283 is_sync
= domain_is_sync(domain
, kernel
);
284 isl_union_set_free(domain
);
289 /* Is "node" part of a sequence with a previous synchronization statement
291 * That is, is the parent of "node" a filter such that there is
292 * a previous filter that picks out exactly such a synchronization statement?
294 static int has_preceding_sync(__isl_keep isl_schedule_node
*node
,
295 struct ppcg_kernel
*kernel
)
299 node
= isl_schedule_node_copy(node
);
300 node
= isl_schedule_node_parent(node
);
301 while (!found
&& isl_schedule_node_has_previous_sibling(node
)) {
302 node
= isl_schedule_node_previous_sibling(node
);
305 found
= node_is_sync_filter(node
, kernel
);
309 isl_schedule_node_free(node
);
314 /* Is "node" part of a sequence with a subsequent synchronization statement
316 * That is, is the parent of "node" a filter such that there is
317 * a subsequent filter that picks out exactly such a synchronization statement?
319 static int has_following_sync(__isl_keep isl_schedule_node
*node
,
320 struct ppcg_kernel
*kernel
)
324 node
= isl_schedule_node_copy(node
);
325 node
= isl_schedule_node_parent(node
);
326 while (!found
&& isl_schedule_node_has_next_sibling(node
)) {
327 node
= isl_schedule_node_next_sibling(node
);
330 found
= node_is_sync_filter(node
, kernel
);
334 isl_schedule_node_free(node
);
339 /* Does the subtree rooted at "node" (which is a band node) contain
340 * any synchronization statement for "kernel" that precedes
341 * the core computation of "kernel" (identified by the elements
344 static int has_sync_before_core(__isl_keep isl_schedule_node
*node
,
345 struct ppcg_kernel
*kernel
)
350 node
= isl_schedule_node_copy(node
);
351 while ((is_thread
= node_is_thread(node
)) == 0) {
352 node
= core_child(node
, kernel
->core
);
353 has_sync
= has_preceding_sync(node
, kernel
);
354 if (has_sync
< 0 || has_sync
)
357 if (is_thread
< 0 || !node
)
359 isl_schedule_node_free(node
);
364 /* Does the subtree rooted at "node" (which is a band node) contain
365 * any synchronization statement for "kernel" that follows
366 * the core computation of "kernel" (identified by the elements
369 static int has_sync_after_core(__isl_keep isl_schedule_node
*node
,
370 struct ppcg_kernel
*kernel
)
375 node
= isl_schedule_node_copy(node
);
376 while ((is_thread
= node_is_thread(node
)) == 0) {
377 node
= core_child(node
, kernel
->core
);
378 has_sync
= has_following_sync(node
, kernel
);
379 if (has_sync
< 0 || has_sync
)
382 if (is_thread
< 0 || !node
)
384 isl_schedule_node_free(node
);
389 /* Insert (or extend) an extension on top of "node" that puts
390 * a synchronization node for "kernel" before "node".
391 * Return a pointer to the original node in the updated schedule tree.
393 static __isl_give isl_schedule_node
*insert_sync_before(
394 __isl_take isl_schedule_node
*node
, struct ppcg_kernel
*kernel
)
396 isl_union_set
*domain
;
397 isl_schedule_node
*graft
;
402 domain
= create_sync_domain(kernel
);
403 graft
= isl_schedule_node_from_domain(domain
);
404 node
= isl_schedule_node_graft_before(node
, graft
);
409 /* Insert (or extend) an extension on top of "node" that puts
410 * a synchronization node for "kernel" afater "node".
411 * Return a pointer to the original node in the updated schedule tree.
413 static __isl_give isl_schedule_node
*insert_sync_after(
414 __isl_take isl_schedule_node
*node
, struct ppcg_kernel
*kernel
)
416 isl_union_set
*domain
;
417 isl_schedule_node
*graft
;
422 domain
= create_sync_domain(kernel
);
423 graft
= isl_schedule_node_from_domain(domain
);
424 node
= isl_schedule_node_graft_after(node
, graft
);
429 /* Insert an extension on top of "node" that puts a synchronization node
430 * for "kernel" before "node" unless there already is
431 * such a synchronization node.
433 __isl_give isl_schedule_node
*gpu_tree_ensure_preceding_sync(
434 __isl_take isl_schedule_node
*node
, struct ppcg_kernel
*kernel
)
438 has_sync
= has_preceding_sync(node
, kernel
);
440 return isl_schedule_node_free(node
);
443 return insert_sync_before(node
, kernel
);
446 /* Insert an extension on top of "node" that puts a synchronization node
447 * for "kernel" after "node" unless there already is
448 * such a synchronization node.
450 __isl_give isl_schedule_node
*gpu_tree_ensure_following_sync(
451 __isl_take isl_schedule_node
*node
, struct ppcg_kernel
*kernel
)
455 has_sync
= has_following_sync(node
, kernel
);
457 return isl_schedule_node_free(node
);
460 return insert_sync_after(node
, kernel
);
463 /* Insert an extension on top of "node" that puts a synchronization node
464 * for "kernel" after "node" unless there already is such a sync node or
465 * "node" itself already * contains a synchronization node following
466 * the core computation of "kernel".
468 __isl_give isl_schedule_node
*gpu_tree_ensure_sync_after_core(
469 __isl_take isl_schedule_node
*node
, struct ppcg_kernel
*kernel
)
473 has_sync
= has_sync_after_core(node
, kernel
);
475 return isl_schedule_node_free(node
);
478 has_sync
= has_following_sync(node
, kernel
);
480 return isl_schedule_node_free(node
);
483 return insert_sync_after(node
, kernel
);
486 /* Move left in the sequence on top of "node" to a synchronization node
488 * If "node" itself contains a synchronization node preceding
489 * the core computation of "kernel", then return "node" itself.
490 * Otherwise, if "node" does not have a preceding synchronization node,
491 * then create one first.
493 __isl_give isl_schedule_node
*gpu_tree_move_left_to_sync(
494 __isl_take isl_schedule_node
*node
, struct ppcg_kernel
*kernel
)
499 has_sync
= has_sync_before_core(node
, kernel
);
501 return isl_schedule_node_free(node
);
504 node
= gpu_tree_ensure_preceding_sync(node
, kernel
);
505 node
= isl_schedule_node_parent(node
);
506 while ((is_sync
= node_is_sync_filter(node
, kernel
)) == 0)
507 node
= isl_schedule_node_previous_sibling(node
);
509 node
= isl_schedule_node_free(node
);
510 node
= isl_schedule_node_child(node
, 0);
515 /* Move right in the sequence on top of "node" to a synchronization node
517 * If "node" itself contains a synchronization node following
518 * the core computation of "kernel", then return "node" itself.
519 * Otherwise, if "node" does not have a following synchronization node,
520 * then create one first.
522 __isl_give isl_schedule_node
*gpu_tree_move_right_to_sync(
523 __isl_take isl_schedule_node
*node
, struct ppcg_kernel
*kernel
)
528 has_sync
= has_sync_after_core(node
, kernel
);
530 return isl_schedule_node_free(node
);
533 node
= gpu_tree_ensure_following_sync(node
, kernel
);
534 node
= isl_schedule_node_parent(node
);
535 while ((is_sync
= node_is_sync_filter(node
, kernel
)) == 0)
536 node
= isl_schedule_node_next_sibling(node
);
538 node
= isl_schedule_node_free(node
);
539 node
= isl_schedule_node_child(node
, 0);