2 * Copyright 2012 INRIA Paris-Rocquencourt
3 * Copyright 2012 Ecole Normale Superieure
5 * Use of this software is governed by the MIT license
7 * Written by Tobias Grosser, INRIA Paris-Rocquencourt,
8 * Domaine de Voluceau, Rocquenqourt, B.P. 105,
9 * 78153 Le Chesnay Cedex France
10 * and Sven Verdoolaege,
11 * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
22 #include <isl/ast_build.h>
23 #include <isl/schedule.h>
24 #include <isl/schedule_node.h>
28 #include "ppcg_options.h"
34 /* Representation of a statement inside a generated AST.
36 * "stmt" refers to the original statement.
37 * "ref2expr" maps the reference identifier of each access in
38 * the statement to an AST expression that should be printed
39 * at the place of the access.
42 struct pet_stmt
*stmt
;
44 isl_id_to_ast_expr
*ref2expr
;
47 static void ppcg_stmt_free(void *user
)
49 struct ppcg_stmt
*stmt
= user
;
54 isl_id_to_ast_expr_free(stmt
->ref2expr
);
59 /* Derive the output file name from the input file name.
60 * 'input' is the entire path of the input file. The output
61 * is the file name plus the additional extension.
63 * We will basically replace everything after the last point
64 * with '.ppcg.c'. This means file.c becomes file.ppcg.c
66 static FILE *get_output_file(const char *input
, const char *output
)
70 const char ppcg_marker
[] = ".ppcg";
74 len
= ppcg_extract_base_name(name
, input
);
76 strcpy(name
+ len
, ppcg_marker
);
77 ext
= strrchr(input
, '.');
78 strcpy(name
+ len
+ sizeof(ppcg_marker
) - 1, ext
? ext
: ".c");
83 file
= fopen(output
, "w");
85 fprintf(stderr
, "Unable to open '%s' for writing\n", output
);
92 /* Data used to annotate for nodes in the ast.
94 struct ast_node_userinfo
{
95 /* The for node is an openmp parallel for node. */
99 /* Information used while building the ast.
101 struct ast_build_userinfo
{
102 /* The current ppcg scop. */
103 struct ppcg_scop
*scop
;
105 /* Are we currently in a parallel for loop? */
109 /* Check if the current scheduling dimension is parallel.
111 * We check for parallelism by verifying that the loop does not carry any
113 * If the live_range_reordering option is set, then this currently
114 * includes the order dependences. In principle, non-zero order dependences
115 * could be allowed, but this would require privatization and/or expansion.
117 * Parallelism test: if the distance is zero in all outer dimensions, then it
118 * has to be zero in the current dimension as well.
119 * Implementation: first, translate dependences into time space, then force
120 * outer dimensions to be equal. If the distance is zero in the current
121 * dimension, then the loop is parallel.
122 * The distance is zero in the current dimension if it is a subset of a map
123 * with equal values for the current dimension.
125 static int ast_schedule_dim_is_parallel(__isl_keep isl_ast_build
*build
,
126 struct ast_build_userinfo
*build_info
)
128 struct ppcg_scop
*scop
= build_info
->scop
;
129 isl_union_map
*schedule
, *deps
;
130 isl_map
*schedule_deps
, *test
;
131 isl_space
*schedule_space
;
132 unsigned i
, dimension
, is_parallel
;
134 schedule
= isl_ast_build_get_schedule(build
);
135 schedule_space
= isl_ast_build_get_schedule_space(build
);
137 dimension
= isl_space_dim(schedule_space
, isl_dim_out
) - 1;
139 deps
= isl_union_map_copy(scop
->dep_flow
);
140 deps
= isl_union_map_union(deps
, isl_union_map_copy(scop
->dep_false
));
141 if (scop
->options
->live_range_reordering
) {
142 isl_union_map
*order
= isl_union_map_copy(scop
->dep_order
);
143 deps
= isl_union_map_union(deps
, order
);
145 deps
= isl_union_map_apply_range(deps
, isl_union_map_copy(schedule
));
146 deps
= isl_union_map_apply_domain(deps
, schedule
);
148 if (isl_union_map_is_empty(deps
)) {
149 isl_union_map_free(deps
);
150 isl_space_free(schedule_space
);
154 schedule_deps
= isl_map_from_union_map(deps
);
156 for (i
= 0; i
< dimension
; i
++)
157 schedule_deps
= isl_map_equate(schedule_deps
, isl_dim_out
, i
,
160 test
= isl_map_universe(isl_map_get_space(schedule_deps
));
161 test
= isl_map_equate(test
, isl_dim_out
, dimension
, isl_dim_in
,
163 is_parallel
= isl_map_is_subset(schedule_deps
, test
);
165 isl_space_free(schedule_space
);
167 isl_map_free(schedule_deps
);
172 /* Mark a for node openmp parallel, if it is the outermost parallel for node.
174 static void mark_openmp_parallel(__isl_keep isl_ast_build
*build
,
175 struct ast_build_userinfo
*build_info
,
176 struct ast_node_userinfo
*node_info
)
178 if (build_info
->in_parallel_for
)
181 if (ast_schedule_dim_is_parallel(build
, build_info
)) {
182 build_info
->in_parallel_for
= 1;
183 node_info
->is_openmp
= 1;
187 /* Allocate an ast_node_info structure and initialize it with default values.
189 static struct ast_node_userinfo
*allocate_ast_node_userinfo()
191 struct ast_node_userinfo
*node_info
;
192 node_info
= (struct ast_node_userinfo
*)
193 malloc(sizeof(struct ast_node_userinfo
));
194 node_info
->is_openmp
= 0;
198 /* Free an ast_node_info structure.
200 static void free_ast_node_userinfo(void *ptr
)
202 struct ast_node_userinfo
*info
;
203 info
= (struct ast_node_userinfo
*) ptr
;
207 /* This method is executed before the construction of a for node. It creates
208 * an isl_id that is used to annotate the subsequently generated ast for nodes.
210 * In this function we also run the following analyses:
212 * - Detection of openmp parallel loops
214 static __isl_give isl_id
*ast_build_before_for(
215 __isl_keep isl_ast_build
*build
, void *user
)
218 struct ast_build_userinfo
*build_info
;
219 struct ast_node_userinfo
*node_info
;
221 build_info
= (struct ast_build_userinfo
*) user
;
222 node_info
= allocate_ast_node_userinfo();
223 id
= isl_id_alloc(isl_ast_build_get_ctx(build
), "", node_info
);
224 id
= isl_id_set_free_user(id
, free_ast_node_userinfo
);
226 mark_openmp_parallel(build
, build_info
, node_info
);
231 /* This method is executed after the construction of a for node.
233 * It performs the following actions:
235 * - Reset the 'in_parallel_for' flag, as soon as we leave a for node,
236 * that is marked as openmp parallel.
239 static __isl_give isl_ast_node
*ast_build_after_for(
240 __isl_take isl_ast_node
*node
, __isl_keep isl_ast_build
*build
,
244 struct ast_build_userinfo
*build_info
;
245 struct ast_node_userinfo
*info
;
247 id
= isl_ast_node_get_annotation(node
);
248 info
= isl_id_get_user(id
);
250 if (info
&& info
->is_openmp
) {
251 build_info
= (struct ast_build_userinfo
*) user
;
252 build_info
->in_parallel_for
= 0;
260 /* Find the element in scop->stmts that has the given "id".
262 static struct pet_stmt
*find_stmt(struct ppcg_scop
*scop
, __isl_keep isl_id
*id
)
266 for (i
= 0; i
< scop
->pet
->n_stmt
; ++i
) {
267 struct pet_stmt
*stmt
= scop
->pet
->stmts
[i
];
270 id_i
= isl_set_get_tuple_id(stmt
->domain
);
277 isl_die(isl_id_get_ctx(id
), isl_error_internal
,
278 "statement not found", return NULL
);
281 /* Print a user statement in the generated AST.
282 * The ppcg_stmt has been attached to the node in at_each_domain.
284 static __isl_give isl_printer
*print_user(__isl_take isl_printer
*p
,
285 __isl_take isl_ast_print_options
*print_options
,
286 __isl_keep isl_ast_node
*node
, void *user
)
288 struct ppcg_stmt
*stmt
;
291 id
= isl_ast_node_get_annotation(node
);
292 stmt
= isl_id_get_user(id
);
295 p
= pet_stmt_print_body(stmt
->stmt
, p
, stmt
->ref2expr
);
297 isl_ast_print_options_free(print_options
);
303 /* Print a for loop node as an openmp parallel loop.
305 * To print an openmp parallel loop we print a normal for loop, but add
306 * "#pragma openmp parallel for" in front.
308 * Variables that are declared within the body of this for loop are
309 * automatically openmp 'private'. Iterators declared outside of the
310 * for loop are automatically openmp 'shared'. As ppcg declares all iterators
311 * at the position where they are assigned, there is no need to explicitly mark
312 * variables. Their automatically assigned type is already correct.
314 * This function only generates valid OpenMP code, if the ast was generated
315 * with the 'atomic-bounds' option enabled.
318 static __isl_give isl_printer
*print_for_with_openmp(
319 __isl_keep isl_ast_node
*node
, __isl_take isl_printer
*p
,
320 __isl_take isl_ast_print_options
*print_options
)
322 p
= isl_printer_start_line(p
);
323 p
= isl_printer_print_str(p
, "#pragma omp parallel for");
324 p
= isl_printer_end_line(p
);
326 p
= isl_ast_node_for_print(node
, p
, print_options
);
333 * Depending on how the node is annotated, we either print a normal
334 * for node or an openmp parallel for node.
336 static __isl_give isl_printer
*print_for(__isl_take isl_printer
*p
,
337 __isl_take isl_ast_print_options
*print_options
,
338 __isl_keep isl_ast_node
*node
, void *user
)
344 id
= isl_ast_node_get_annotation(node
);
347 struct ast_node_userinfo
*info
;
349 info
= (struct ast_node_userinfo
*) isl_id_get_user(id
);
350 if (info
&& info
->is_openmp
)
355 p
= print_for_with_openmp(node
, p
, print_options
);
357 p
= isl_ast_node_for_print(node
, p
, print_options
);
364 /* Index transformation callback for pet_stmt_build_ast_exprs.
366 * "index" expresses the array indices in terms of statement iterators
367 * "iterator_map" expresses the statement iterators in terms of
368 * AST loop iterators.
370 * The result expresses the array indices in terms of
371 * AST loop iterators.
373 static __isl_give isl_multi_pw_aff
*pullback_index(
374 __isl_take isl_multi_pw_aff
*index
, __isl_keep isl_id
*id
, void *user
)
376 isl_pw_multi_aff
*iterator_map
= user
;
378 iterator_map
= isl_pw_multi_aff_copy(iterator_map
);
379 return isl_multi_pw_aff_pullback_pw_multi_aff(index
, iterator_map
);
382 /* Transform the accesses in the statement associated to the domain
383 * called by "node" to refer to the AST loop iterators, construct
384 * corresponding AST expressions using "build",
385 * collect them in a ppcg_stmt and annotate the node with the ppcg_stmt.
387 static __isl_give isl_ast_node
*at_each_domain(__isl_take isl_ast_node
*node
,
388 __isl_keep isl_ast_build
*build
, void *user
)
390 struct ppcg_scop
*scop
= user
;
391 isl_ast_expr
*expr
, *arg
;
395 isl_pw_multi_aff
*iterator_map
;
396 struct ppcg_stmt
*stmt
;
398 ctx
= isl_ast_node_get_ctx(node
);
399 stmt
= isl_calloc_type(ctx
, struct ppcg_stmt
);
403 expr
= isl_ast_node_user_get_expr(node
);
404 arg
= isl_ast_expr_get_op_arg(expr
, 0);
405 isl_ast_expr_free(expr
);
406 id
= isl_ast_expr_get_id(arg
);
407 isl_ast_expr_free(arg
);
408 stmt
->stmt
= find_stmt(scop
, id
);
413 map
= isl_map_from_union_map(isl_ast_build_get_schedule(build
));
414 map
= isl_map_reverse(map
);
415 iterator_map
= isl_pw_multi_aff_from_map(map
);
416 stmt
->ref2expr
= pet_stmt_build_ast_exprs(stmt
->stmt
, build
,
417 &pullback_index
, iterator_map
, NULL
, NULL
);
418 isl_pw_multi_aff_free(iterator_map
);
420 id
= isl_id_alloc(isl_ast_node_get_ctx(node
), NULL
, stmt
);
421 id
= isl_id_set_free_user(id
, &ppcg_stmt_free
);
422 return isl_ast_node_set_annotation(node
, id
);
424 ppcg_stmt_free(stmt
);
425 return isl_ast_node_free(node
);
428 /* Set *depth (initialized to 0 by the caller) to the maximum
429 * of the schedule depths of the leaf nodes for which this function is called.
431 static isl_bool
update_depth(__isl_keep isl_schedule_node
*node
, void *user
)
436 if (isl_schedule_node_get_type(node
) != isl_schedule_node_leaf
)
437 return isl_bool_true
;
438 node_depth
= isl_schedule_node_get_schedule_depth(node
);
439 if (node_depth
> *depth
)
442 return isl_bool_false
;
445 /* This function is called for each node in a CPU AST.
446 * In case of a user node, print the macro definitions required
447 * for printing the AST expressions in the annotation, if any.
448 * For other nodes, return true such that descendants are also
451 * In particular, print the macro definitions needed for the substitutions
452 * of the original user statements.
454 static isl_bool
at_node(__isl_keep isl_ast_node
*node
, void *user
)
456 struct ppcg_stmt
*stmt
;
458 isl_printer
**p
= user
;
460 if (isl_ast_node_get_type(node
) != isl_ast_node_user
)
461 return isl_bool_true
;
463 id
= isl_ast_node_get_annotation(node
);
464 stmt
= isl_id_get_user(id
);
468 return isl_bool_error
;
470 *p
= ppcg_print_body_macros(*p
, stmt
->ref2expr
);
472 return isl_bool_error
;
474 return isl_bool_false
;
477 /* Print the required macros for the CPU AST "node" to "p",
478 * including those needed for the user statements inside the AST.
480 static __isl_give isl_printer
*cpu_print_macros(__isl_take isl_printer
*p
,
481 __isl_keep isl_ast_node
*node
)
483 if (isl_ast_node_foreach_descendant_top_down(node
, &at_node
, &p
) < 0)
484 return isl_printer_free(p
);
485 p
= ppcg_print_macros(p
, node
);
489 /* Code generate the scop 'scop' using "schedule"
490 * and print the corresponding C code to 'p'.
492 static __isl_give isl_printer
*print_scop(struct ppcg_scop
*scop
,
493 __isl_take isl_schedule
*schedule
, __isl_take isl_printer
*p
,
494 struct ppcg_options
*options
)
496 isl_ctx
*ctx
= isl_printer_get_ctx(p
);
497 isl_ast_build
*build
;
498 isl_ast_print_options
*print_options
;
500 isl_id_list
*iterators
;
501 struct ast_build_userinfo build_info
;
505 if (isl_schedule_foreach_schedule_node_top_down(schedule
, &update_depth
,
509 build
= isl_ast_build_alloc(ctx
);
510 iterators
= ppcg_scop_generate_names(scop
, depth
, "c");
511 build
= isl_ast_build_set_iterators(build
, iterators
);
512 build
= isl_ast_build_set_at_each_domain(build
, &at_each_domain
, scop
);
514 if (options
->openmp
) {
515 build_info
.scop
= scop
;
516 build_info
.in_parallel_for
= 0;
518 build
= isl_ast_build_set_before_each_for(build
,
519 &ast_build_before_for
,
521 build
= isl_ast_build_set_after_each_for(build
,
522 &ast_build_after_for
,
526 tree
= isl_ast_build_node_from_schedule(build
, schedule
);
527 isl_ast_build_free(build
);
529 print_options
= isl_ast_print_options_alloc(ctx
);
530 print_options
= isl_ast_print_options_set_print_user(print_options
,
533 print_options
= isl_ast_print_options_set_print_for(print_options
,
536 p
= cpu_print_macros(p
, tree
);
537 p
= isl_ast_node_print(tree
, p
, print_options
);
539 isl_ast_node_free(tree
);
543 isl_schedule_free(schedule
);
548 /* Tile the band node "node" with tile sizes "sizes" and
549 * mark all members of the resulting tile node as "atomic".
551 static __isl_give isl_schedule_node
*tile(__isl_take isl_schedule_node
*node
,
552 __isl_take isl_multi_val
*sizes
)
554 node
= isl_schedule_node_band_tile(node
, sizes
);
555 node
= ppcg_set_schedule_node_type(node
, isl_ast_loop_atomic
);
560 /* Tile "node", if it is a band node with at least 2 members.
561 * The tile sizes are set from the "tile_size" option.
563 static __isl_give isl_schedule_node
*tile_band(
564 __isl_take isl_schedule_node
*node
, void *user
)
566 struct ppcg_scop
*scop
= user
;
569 isl_multi_val
*sizes
;
571 if (isl_schedule_node_get_type(node
) != isl_schedule_node_band
)
574 n
= isl_schedule_node_band_n_member(node
);
578 space
= isl_schedule_node_band_get_space(node
);
579 sizes
= ppcg_multi_val_from_int(space
, scop
->options
->tile_size
);
581 return tile(node
, sizes
);
584 /* Construct schedule constraints from the dependences in ps
585 * for the purpose of computing a schedule for a CPU.
587 * The proximity constraints are set to the flow dependences.
589 * If live-range reordering is allowed then the conditional validity
590 * constraints are set to the order dependences with the flow dependences
591 * as condition. That is, a live-range (flow dependence) will be either
592 * local to an iteration of a band or all adjacent order dependences
593 * will be respected by the band.
594 * The validity constraints are set to the union of the flow dependences
595 * and the forced dependences, while the coincidence constraints
596 * are set to the union of the flow dependences, the forced dependences and
597 * the order dependences.
599 * If live-range reordering is not allowed, then both the validity
600 * and the coincidence constraints are set to the union of the flow
601 * dependences and the false dependences.
603 * Note that the coincidence constraints are only set when the "openmp"
604 * options is set. Even though the way openmp pragmas are introduced
605 * does not rely on the coincident property of the schedule band members,
606 * the coincidence constraints do affect the way the schedule is constructed,
607 * such that more schedule dimensions should be detected as parallel
608 * by ast_schedule_dim_is_parallel.
609 * Since the order dependences are also taken into account by
610 * ast_schedule_dim_is_parallel, they are also added to
611 * the coincidence constraints. If the openmp handling learns
612 * how to privatize some memory, then the corresponding order
613 * dependences can be removed from the coincidence constraints.
615 static __isl_give isl_schedule_constraints
*construct_cpu_schedule_constraints(
616 struct ppcg_scop
*ps
)
618 isl_schedule_constraints
*sc
;
619 isl_union_map
*validity
, *coincidence
;
621 sc
= isl_schedule_constraints_on_domain(isl_union_set_copy(ps
->domain
));
622 if (ps
->options
->live_range_reordering
) {
623 sc
= isl_schedule_constraints_set_conditional_validity(sc
,
624 isl_union_map_copy(ps
->tagged_dep_flow
),
625 isl_union_map_copy(ps
->tagged_dep_order
));
626 validity
= isl_union_map_copy(ps
->dep_flow
);
627 validity
= isl_union_map_union(validity
,
628 isl_union_map_copy(ps
->dep_forced
));
629 if (ps
->options
->openmp
) {
630 coincidence
= isl_union_map_copy(validity
);
631 coincidence
= isl_union_map_union(coincidence
,
632 isl_union_map_copy(ps
->dep_order
));
635 validity
= isl_union_map_copy(ps
->dep_flow
);
636 validity
= isl_union_map_union(validity
,
637 isl_union_map_copy(ps
->dep_false
));
638 if (ps
->options
->openmp
)
639 coincidence
= isl_union_map_copy(validity
);
641 if (ps
->options
->openmp
)
642 sc
= isl_schedule_constraints_set_coincidence(sc
, coincidence
);
643 sc
= isl_schedule_constraints_set_validity(sc
, validity
);
644 sc
= isl_schedule_constraints_set_proximity(sc
,
645 isl_union_map_copy(ps
->dep_flow
));
650 /* Compute a schedule for the scop "ps".
652 * First derive the appropriate schedule constraints from the dependences
653 * in "ps" and then compute a schedule from those schedule constraints,
654 * possibly grouping statement instances based on the input schedule.
656 static __isl_give isl_schedule
*compute_cpu_schedule(struct ppcg_scop
*ps
)
658 isl_schedule_constraints
*sc
;
659 isl_schedule
*schedule
;
664 sc
= construct_cpu_schedule_constraints(ps
);
666 schedule
= ppcg_compute_schedule(sc
, ps
->schedule
, ps
->options
);
671 /* Compute a new schedule to the scop "ps" if the reschedule option is set.
672 * Otherwise, return a copy of the original schedule.
674 static __isl_give isl_schedule
*optionally_compute_schedule(void *user
)
676 struct ppcg_scop
*ps
= user
;
680 if (!ps
->options
->reschedule
)
681 return isl_schedule_copy(ps
->schedule
);
682 return compute_cpu_schedule(ps
);
685 /* Compute a schedule based on the dependences in "ps" and
686 * tile it if requested by the user.
688 static __isl_give isl_schedule
*get_schedule(struct ppcg_scop
*ps
,
689 struct ppcg_options
*options
)
692 isl_schedule
*schedule
;
697 ctx
= isl_union_set_get_ctx(ps
->domain
);
698 schedule
= ppcg_get_schedule(ctx
, options
,
699 &optionally_compute_schedule
, ps
);
700 if (ps
->options
->tile
)
701 schedule
= isl_schedule_map_schedule_node_bottom_up(schedule
,
707 /* Generate CPU code for the scop "ps" using "schedule" and
708 * print the corresponding C code to "p", including variable declarations.
710 static __isl_give isl_printer
*print_cpu_with_schedule(
711 __isl_take isl_printer
*p
, struct ppcg_scop
*ps
,
712 __isl_take isl_schedule
*schedule
, struct ppcg_options
*options
)
717 p
= isl_printer_start_line(p
);
718 p
= isl_printer_print_str(p
, "/* ppcg generated CPU code */");
719 p
= isl_printer_end_line(p
);
721 p
= isl_printer_start_line(p
);
722 p
= isl_printer_end_line(p
);
724 p
= ppcg_set_macro_names(p
);
725 p
= ppcg_print_exposed_declarations(p
, ps
);
726 hidden
= ppcg_scop_any_hidden_declarations(ps
);
728 p
= ppcg_start_block(p
);
729 p
= ppcg_print_hidden_declarations(p
, ps
);
732 context
= isl_set_copy(ps
->context
);
733 context
= isl_set_from_params(context
);
734 schedule
= isl_schedule_insert_context(schedule
, context
);
735 if (options
->debug
->dump_final_schedule
)
736 isl_schedule_dump(schedule
);
737 p
= print_scop(ps
, schedule
, p
, options
);
739 p
= ppcg_end_block(p
);
744 /* Generate CPU code for the scop "ps" and print the corresponding C code
745 * to "p", including variable declarations.
747 __isl_give isl_printer
*print_cpu(__isl_take isl_printer
*p
,
748 struct ppcg_scop
*ps
, struct ppcg_options
*options
)
750 isl_schedule
*schedule
;
752 schedule
= isl_schedule_copy(ps
->schedule
);
753 return print_cpu_with_schedule(p
, ps
, schedule
, options
);
756 /* Generate CPU code for "scop" and print it to "p".
758 * First obtain a schedule for "scop" and then print code for "scop"
759 * using that schedule.
761 static __isl_give isl_printer
*generate(__isl_take isl_printer
*p
,
762 struct ppcg_scop
*scop
, struct ppcg_options
*options
)
764 isl_schedule
*schedule
;
766 schedule
= get_schedule(scop
, options
);
768 return print_cpu_with_schedule(p
, scop
, schedule
, options
);
771 /* Wrapper around generate for use as a ppcg_transform callback.
773 static __isl_give isl_printer
*print_cpu_wrap(__isl_take isl_printer
*p
,
774 struct ppcg_scop
*scop
, void *user
)
776 struct ppcg_options
*options
= user
;
778 return generate(p
, scop
, options
);
781 /* Transform the code in the file called "input" by replacing
782 * all scops by corresponding CPU code and write the results to a file
785 int generate_cpu(isl_ctx
*ctx
, struct ppcg_options
*options
,
786 const char *input
, const char *output
)
791 output_file
= get_output_file(input
, output
);
795 r
= ppcg_transform(ctx
, input
, output_file
, options
,
796 &print_cpu_wrap
, options
);