Remove unused variable never used
[ppcg.git] / cpu.c
blob03ea71dba341796c48ae98ebdf2d468ce157e386
1 /*
2 * Copyright 2012 INRIA Paris-Rocquencourt
3 * Copyright 2012 Ecole Normale Superieure
5 * Use of this software is governed by the MIT license
7 * Written by Tobias Grosser, INRIA Paris-Rocquencourt,
8 * Domaine de Voluceau, Rocquenqourt, B.P. 105,
9 * 78153 Le Chesnay Cedex France
10 * and Sven Verdoolaege,
11 * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14 #include <limits.h>
15 #include <stdio.h>
16 #include <string.h>
18 #include <isl/aff.h>
19 #include <isl/ctx.h>
20 #include <isl/flow.h>
21 #include <isl/map.h>
22 #include <isl/ast_build.h>
23 #include <isl/schedule.h>
24 #include <isl/schedule_node.h>
25 #include <pet.h>
27 #include "ppcg.h"
28 #include "ppcg_options.h"
29 #include "cpu.h"
30 #include "print.h"
31 #include "schedule.h"
32 #include "util.h"
34 /* Representation of a statement inside a generated AST.
36 * "stmt" refers to the original statement.
37 * "ref2expr" maps the reference identifier of each access in
38 * the statement to an AST expression that should be printed
39 * at the place of the access.
41 struct ppcg_stmt {
42 struct pet_stmt *stmt;
44 isl_id_to_ast_expr *ref2expr;
47 static void ppcg_stmt_free(void *user)
49 struct ppcg_stmt *stmt = user;
50 int i;
52 if (!stmt)
53 return;
55 isl_id_to_ast_expr_free(stmt->ref2expr);
57 free(stmt);
60 /* Derive the output file name from the input file name.
61 * 'input' is the entire path of the input file. The output
62 * is the file name plus the additional extension.
64 * We will basically replace everything after the last point
65 * with '.ppcg.c'. This means file.c becomes file.ppcg.c
67 static FILE *get_output_file(const char *input, const char *output)
69 char name[PATH_MAX];
70 const char *ext;
71 const char ppcg_marker[] = ".ppcg";
72 int len;
73 FILE *file;
75 len = ppcg_extract_base_name(name, input);
77 strcpy(name + len, ppcg_marker);
78 ext = strrchr(input, '.');
79 strcpy(name + len + sizeof(ppcg_marker) - 1, ext ? ext : ".c");
81 if (!output)
82 output = name;
84 file = fopen(output, "w");
85 if (!file) {
86 fprintf(stderr, "Unable to open '%s' for writing\n", output);
87 return NULL;
90 return file;
93 /* Data used to annotate for nodes in the ast.
95 struct ast_node_userinfo {
96 /* The for node is an openmp parallel for node. */
97 int is_openmp;
100 /* Information used while building the ast.
102 struct ast_build_userinfo {
103 /* The current ppcg scop. */
104 struct ppcg_scop *scop;
106 /* Are we currently in a parallel for loop? */
107 int in_parallel_for;
110 /* Check if the current scheduling dimension is parallel.
112 * We check for parallelism by verifying that the loop does not carry any
113 * dependences.
114 * If the live_range_reordering option is set, then this currently
115 * includes the order dependences. In principle, non-zero order dependences
116 * could be allowed, but this would require privatization and/or expansion.
118 * Parallelism test: if the distance is zero in all outer dimensions, then it
119 * has to be zero in the current dimension as well.
120 * Implementation: first, translate dependences into time space, then force
121 * outer dimensions to be equal. If the distance is zero in the current
122 * dimension, then the loop is parallel.
123 * The distance is zero in the current dimension if it is a subset of a map
124 * with equal values for the current dimension.
126 static int ast_schedule_dim_is_parallel(__isl_keep isl_ast_build *build,
127 struct ppcg_scop *scop)
129 isl_union_map *schedule_node, *schedule, *deps;
130 isl_map *schedule_deps, *test;
131 isl_space *schedule_space;
132 unsigned i, dimension, is_parallel;
134 schedule = isl_ast_build_get_schedule(build);
135 schedule_space = isl_ast_build_get_schedule_space(build);
137 dimension = isl_space_dim(schedule_space, isl_dim_out) - 1;
139 deps = isl_union_map_copy(scop->dep_flow);
140 deps = isl_union_map_union(deps, isl_union_map_copy(scop->dep_false));
141 if (scop->options->live_range_reordering) {
142 isl_union_map *order = isl_union_map_copy(scop->dep_order);
143 deps = isl_union_map_union(deps, order);
145 deps = isl_union_map_apply_range(deps, isl_union_map_copy(schedule));
146 deps = isl_union_map_apply_domain(deps, schedule);
148 if (isl_union_map_is_empty(deps)) {
149 isl_union_map_free(deps);
150 isl_space_free(schedule_space);
151 return 1;
154 schedule_deps = isl_map_from_union_map(deps);
156 for (i = 0; i < dimension; i++)
157 schedule_deps = isl_map_equate(schedule_deps, isl_dim_out, i,
158 isl_dim_in, i);
160 test = isl_map_universe(isl_map_get_space(schedule_deps));
161 test = isl_map_equate(test, isl_dim_out, dimension, isl_dim_in,
162 dimension);
163 is_parallel = isl_map_is_subset(schedule_deps, test);
165 isl_space_free(schedule_space);
166 isl_map_free(test);
167 isl_map_free(schedule_deps);
169 return is_parallel;
172 /* Mark a for node openmp parallel, if it is the outermost parallel for node.
174 static void mark_openmp_parallel(__isl_keep isl_ast_build *build,
175 struct ast_build_userinfo *build_info,
176 struct ast_node_userinfo *node_info)
178 if (build_info->in_parallel_for)
179 return;
181 if (ast_schedule_dim_is_parallel(build, build_info->scop)) {
182 build_info->in_parallel_for = 1;
183 node_info->is_openmp = 1;
187 /* Allocate an ast_node_info structure and initialize it with default values.
189 static struct ast_node_userinfo *allocate_ast_node_userinfo()
191 struct ast_node_userinfo *node_info;
192 node_info = (struct ast_node_userinfo *)
193 malloc(sizeof(struct ast_node_userinfo));
194 node_info->is_openmp = 0;
195 return node_info;
198 /* Free an ast_node_info structure.
200 static void free_ast_node_userinfo(void *ptr)
202 struct ast_node_userinfo *info;
203 info = (struct ast_node_userinfo *) ptr;
204 free(info);
207 /* This method is executed before the construction of a for node. It creates
208 * an isl_id that is used to annotate the subsequently generated ast for nodes.
210 * In this function we also run the following analyses:
212 * - Detection of openmp parallel loops
214 static __isl_give isl_id *ast_build_before_for(
215 __isl_keep isl_ast_build *build, void *user)
217 isl_id *id;
218 struct ast_build_userinfo *build_info;
219 struct ast_node_userinfo *node_info;
221 build_info = (struct ast_build_userinfo *) user;
222 node_info = allocate_ast_node_userinfo();
223 id = isl_id_alloc(isl_ast_build_get_ctx(build), "", node_info);
224 id = isl_id_set_free_user(id, free_ast_node_userinfo);
226 mark_openmp_parallel(build, build_info, node_info);
228 return id;
231 /* This method is executed after the construction of a for node.
233 * It performs the following actions:
235 * - Reset the 'in_parallel_for' flag, as soon as we leave a for node,
236 * that is marked as openmp parallel.
239 static __isl_give isl_ast_node *ast_build_after_for(
240 __isl_take isl_ast_node *node, __isl_keep isl_ast_build *build,
241 void *user)
243 isl_id *id;
244 struct ast_build_userinfo *build_info;
245 struct ast_node_userinfo *info;
247 id = isl_ast_node_get_annotation(node);
248 info = isl_id_get_user(id);
250 if (info && info->is_openmp) {
251 build_info = (struct ast_build_userinfo *) user;
252 build_info->in_parallel_for = 0;
255 isl_id_free(id);
257 return node;
260 /* Find the element in scop->stmts that has the given "id".
262 static struct pet_stmt *find_stmt(struct ppcg_scop *scop, __isl_keep isl_id *id)
264 int i;
266 for (i = 0; i < scop->pet->n_stmt; ++i) {
267 struct pet_stmt *stmt = scop->pet->stmts[i];
268 isl_id *id_i;
270 id_i = isl_set_get_tuple_id(stmt->domain);
271 isl_id_free(id_i);
273 if (id_i == id)
274 return stmt;
277 isl_die(isl_id_get_ctx(id), isl_error_internal,
278 "statement not found", return NULL);
281 /* Print a user statement in the generated AST.
282 * The ppcg_stmt has been attached to the node in at_each_domain.
284 static __isl_give isl_printer *print_user(__isl_take isl_printer *p,
285 __isl_take isl_ast_print_options *print_options,
286 __isl_keep isl_ast_node *node, void *user)
288 struct ppcg_stmt *stmt;
289 isl_id *id;
291 id = isl_ast_node_get_annotation(node);
292 stmt = isl_id_get_user(id);
293 isl_id_free(id);
295 p = pet_stmt_print_body(stmt->stmt, p, stmt->ref2expr);
297 isl_ast_print_options_free(print_options);
299 return p;
303 /* Print a for loop node as an openmp parallel loop.
305 * To print an openmp parallel loop we print a normal for loop, but add
306 * "#pragma openmp parallel for" in front.
308 * Variables that are declared within the body of this for loop are
309 * automatically openmp 'private'. Iterators declared outside of the
310 * for loop are automatically openmp 'shared'. As ppcg declares all iterators
311 * at the position where they are assigned, there is no need to explicitly mark
312 * variables. Their automatically assigned type is already correct.
314 * This function only generates valid OpenMP code, if the ast was generated
315 * with the 'atomic-bounds' option enabled.
318 static __isl_give isl_printer *print_for_with_openmp(
319 __isl_keep isl_ast_node *node, __isl_take isl_printer *p,
320 __isl_take isl_ast_print_options *print_options)
322 p = isl_printer_start_line(p);
323 p = isl_printer_print_str(p, "#pragma omp parallel for");
324 p = isl_printer_end_line(p);
326 p = isl_ast_node_for_print(node, p, print_options);
328 return p;
331 /* Print a for node.
333 * Depending on how the node is annotated, we either print a normal
334 * for node or an openmp parallel for node.
336 static __isl_give isl_printer *print_for(__isl_take isl_printer *p,
337 __isl_take isl_ast_print_options *print_options,
338 __isl_keep isl_ast_node *node, void *user)
340 isl_id *id;
341 int openmp;
343 openmp = 0;
344 id = isl_ast_node_get_annotation(node);
346 if (id) {
347 struct ast_node_userinfo *info;
349 info = (struct ast_node_userinfo *) isl_id_get_user(id);
350 if (info && info->is_openmp)
351 openmp = 1;
354 if (openmp)
355 p = print_for_with_openmp(node, p, print_options);
356 else
357 p = isl_ast_node_for_print(node, p, print_options);
359 isl_id_free(id);
361 return p;
364 /* Index transformation callback for pet_stmt_build_ast_exprs.
366 * "index" expresses the array indices in terms of statement iterators
367 * "iterator_map" expresses the statement iterators in terms of
368 * AST loop iterators.
370 * The result expresses the array indices in terms of
371 * AST loop iterators.
373 static __isl_give isl_multi_pw_aff *pullback_index(
374 __isl_take isl_multi_pw_aff *index, __isl_keep isl_id *id, void *user)
376 isl_pw_multi_aff *iterator_map = user;
378 iterator_map = isl_pw_multi_aff_copy(iterator_map);
379 return isl_multi_pw_aff_pullback_pw_multi_aff(index, iterator_map);
382 /* Transform the accesses in the statement associated to the domain
383 * called by "node" to refer to the AST loop iterators, construct
384 * corresponding AST expressions using "build",
385 * collect them in a ppcg_stmt and annotate the node with the ppcg_stmt.
387 static __isl_give isl_ast_node *at_each_domain(__isl_take isl_ast_node *node,
388 __isl_keep isl_ast_build *build, void *user)
390 struct ppcg_scop *scop = user;
391 isl_ast_expr *expr, *arg;
392 isl_ctx *ctx;
393 isl_id *id;
394 isl_map *map;
395 isl_pw_multi_aff *iterator_map;
396 struct ppcg_stmt *stmt;
398 ctx = isl_ast_node_get_ctx(node);
399 stmt = isl_calloc_type(ctx, struct ppcg_stmt);
400 if (!stmt)
401 goto error;
403 expr = isl_ast_node_user_get_expr(node);
404 arg = isl_ast_expr_get_op_arg(expr, 0);
405 isl_ast_expr_free(expr);
406 id = isl_ast_expr_get_id(arg);
407 isl_ast_expr_free(arg);
408 stmt->stmt = find_stmt(scop, id);
409 isl_id_free(id);
410 if (!stmt->stmt)
411 goto error;
413 map = isl_map_from_union_map(isl_ast_build_get_schedule(build));
414 map = isl_map_reverse(map);
415 iterator_map = isl_pw_multi_aff_from_map(map);
416 stmt->ref2expr = pet_stmt_build_ast_exprs(stmt->stmt, build,
417 &pullback_index, iterator_map, NULL, NULL);
418 isl_pw_multi_aff_free(iterator_map);
420 id = isl_id_alloc(isl_ast_node_get_ctx(node), NULL, stmt);
421 id = isl_id_set_free_user(id, &ppcg_stmt_free);
422 return isl_ast_node_set_annotation(node, id);
423 error:
424 ppcg_stmt_free(stmt);
425 return isl_ast_node_free(node);
428 /* Set *depth (initialized to 0 by the caller) to the maximum
429 * of the schedule depths of the leaf nodes for which this function is called.
431 static isl_bool update_depth(__isl_keep isl_schedule_node *node, void *user)
433 int *depth = user;
434 int node_depth;
436 if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf)
437 return isl_bool_true;
438 node_depth = isl_schedule_node_get_schedule_depth(node);
439 if (node_depth > *depth)
440 *depth = node_depth;
442 return isl_bool_false;
445 /* This function is called for each node in a CPU AST.
446 * In case of a user node, print the macro definitions required
447 * for printing the AST expressions in the annotation, if any.
448 * For other nodes, return true such that descendants are also
449 * visited.
451 * In particular, print the macro definitions needed for the substitutions
452 * of the original user statements.
454 static isl_bool at_node(__isl_keep isl_ast_node *node, void *user)
456 struct ppcg_stmt *stmt;
457 isl_id *id;
458 isl_printer **p = user;
460 if (isl_ast_node_get_type(node) != isl_ast_node_user)
461 return isl_bool_true;
463 id = isl_ast_node_get_annotation(node);
464 stmt = isl_id_get_user(id);
465 isl_id_free(id);
467 if (!stmt)
468 return isl_bool_error;
470 *p = ppcg_print_body_macros(*p, stmt->ref2expr);
471 if (!*p)
472 return isl_bool_error;
474 return isl_bool_false;
477 /* Print the required macros for the CPU AST "node" to "p",
478 * including those needed for the user statements inside the AST.
480 static __isl_give isl_printer *cpu_print_macros(__isl_take isl_printer *p,
481 __isl_keep isl_ast_node *node)
483 if (isl_ast_node_foreach_descendant_top_down(node, &at_node, &p) < 0)
484 return isl_printer_free(p);
485 p = ppcg_print_macros(p, node);
486 return p;
489 /* Code generate the scop 'scop' using "schedule"
490 * and print the corresponding C code to 'p'.
492 static __isl_give isl_printer *print_scop(struct ppcg_scop *scop,
493 __isl_take isl_schedule *schedule, __isl_take isl_printer *p,
494 struct ppcg_options *options)
496 isl_ctx *ctx = isl_printer_get_ctx(p);
497 isl_ast_build *build;
498 isl_ast_print_options *print_options;
499 isl_ast_node *tree;
500 isl_id_list *iterators;
501 struct ast_build_userinfo build_info;
502 int depth;
504 depth = 0;
505 if (isl_schedule_foreach_schedule_node_top_down(schedule, &update_depth,
506 &depth) < 0)
507 goto error;
509 build = isl_ast_build_alloc(ctx);
510 iterators = ppcg_scop_generate_names(scop, depth, "c");
511 build = isl_ast_build_set_iterators(build, iterators);
512 build = isl_ast_build_set_at_each_domain(build, &at_each_domain, scop);
514 if (options->openmp) {
515 build_info.scop = scop;
516 build_info.in_parallel_for = 0;
518 build = isl_ast_build_set_before_each_for(build,
519 &ast_build_before_for,
520 &build_info);
521 build = isl_ast_build_set_after_each_for(build,
522 &ast_build_after_for,
523 &build_info);
526 tree = isl_ast_build_node_from_schedule(build, schedule);
527 isl_ast_build_free(build);
529 print_options = isl_ast_print_options_alloc(ctx);
530 print_options = isl_ast_print_options_set_print_user(print_options,
531 &print_user, NULL);
533 print_options = isl_ast_print_options_set_print_for(print_options,
534 &print_for, NULL);
536 p = cpu_print_macros(p, tree);
537 p = isl_ast_node_print(tree, p, print_options);
539 isl_ast_node_free(tree);
541 return p;
542 error:
543 isl_schedule_free(schedule);
544 isl_printer_free(p);
545 return NULL;
548 /* Tile the band node "node" with tile sizes "sizes" and
549 * mark all members of the resulting tile node as "atomic".
551 static __isl_give isl_schedule_node *tile(__isl_take isl_schedule_node *node,
552 __isl_take isl_multi_val *sizes)
554 node = isl_schedule_node_band_tile(node, sizes);
555 node = ppcg_set_schedule_node_type(node, isl_ast_loop_atomic);
557 return node;
560 /* Tile "node", if it is a band node with at least 2 members.
561 * The tile sizes are set from the "tile_size" option.
563 static __isl_give isl_schedule_node *tile_band(
564 __isl_take isl_schedule_node *node, void *user)
566 struct ppcg_scop *scop = user;
567 int n;
568 isl_space *space;
569 isl_multi_val *sizes;
571 if (isl_schedule_node_get_type(node) != isl_schedule_node_band)
572 return node;
574 n = isl_schedule_node_band_n_member(node);
575 if (n <= 1)
576 return node;
578 space = isl_schedule_node_band_get_space(node);
579 sizes = ppcg_multi_val_from_int(space, scop->options->tile_size);
581 return tile(node, sizes);
584 /* Construct schedule constraints from the dependences in ps
585 * for the purpose of computing a schedule for a CPU.
587 * The proximity constraints are set to the flow dependences.
589 * If live-range reordering is allowed then the conditional validity
590 * constraints are set to the order dependences with the flow dependences
591 * as condition. That is, a live-range (flow dependence) will be either
592 * local to an iteration of a band or all adjacent order dependences
593 * will be respected by the band.
594 * The validity constraints are set to the union of the flow dependences
595 * and the forced dependences, while the coincidence constraints
596 * are set to the union of the flow dependences, the forced dependences and
597 * the order dependences.
599 * If live-range reordering is not allowed, then both the validity
600 * and the coincidence constraints are set to the union of the flow
601 * dependences and the false dependences.
603 * Note that the coincidence constraints are only set when the "openmp"
604 * options is set. Even though the way openmp pragmas are introduced
605 * does not rely on the coincident property of the schedule band members,
606 * the coincidence constraints do affect the way the schedule is constructed,
607 * such that more schedule dimensions should be detected as parallel
608 * by ast_schedule_dim_is_parallel.
609 * Since the order dependences are also taken into account by
610 * ast_schedule_dim_is_parallel, they are also added to
611 * the coincidence constraints. If the openmp handling learns
612 * how to privatize some memory, then the corresponding order
613 * dependences can be removed from the coincidence constraints.
615 static __isl_give isl_schedule_constraints *construct_cpu_schedule_constraints(
616 struct ppcg_scop *ps)
618 isl_schedule_constraints *sc;
619 isl_union_map *validity, *coincidence;
621 sc = isl_schedule_constraints_on_domain(isl_union_set_copy(ps->domain));
622 if (ps->options->live_range_reordering) {
623 sc = isl_schedule_constraints_set_conditional_validity(sc,
624 isl_union_map_copy(ps->tagged_dep_flow),
625 isl_union_map_copy(ps->tagged_dep_order));
626 validity = isl_union_map_copy(ps->dep_flow);
627 validity = isl_union_map_union(validity,
628 isl_union_map_copy(ps->dep_forced));
629 if (ps->options->openmp) {
630 coincidence = isl_union_map_copy(validity);
631 coincidence = isl_union_map_union(coincidence,
632 isl_union_map_copy(ps->dep_order));
634 } else {
635 validity = isl_union_map_copy(ps->dep_flow);
636 validity = isl_union_map_union(validity,
637 isl_union_map_copy(ps->dep_false));
638 if (ps->options->openmp)
639 coincidence = isl_union_map_copy(validity);
641 if (ps->options->openmp)
642 sc = isl_schedule_constraints_set_coincidence(sc, coincidence);
643 sc = isl_schedule_constraints_set_validity(sc, validity);
644 sc = isl_schedule_constraints_set_proximity(sc,
645 isl_union_map_copy(ps->dep_flow));
647 return sc;
650 /* Compute a schedule for the scop "ps".
652 * First derive the appropriate schedule constraints from the dependences
653 * in "ps" and then compute a schedule from those schedule constraints.
655 static __isl_give isl_schedule *compute_cpu_schedule(struct ppcg_scop *ps)
657 isl_schedule_constraints *sc;
658 isl_schedule *schedule;
660 if (!ps)
661 return NULL;
663 sc = construct_cpu_schedule_constraints(ps);
665 if (ps->options->debug->dump_schedule_constraints)
666 isl_schedule_constraints_dump(sc);
667 schedule = isl_schedule_constraints_compute_schedule(sc);
669 return schedule;
672 /* Compute a new schedule to the scop "ps" if the reschedule option is set.
673 * Otherwise, return a copy of the original schedule.
675 static __isl_give isl_schedule *optionally_compute_schedule(void *user)
677 struct ppcg_scop *ps = user;
679 if (!ps)
680 return NULL;
681 if (!ps->options->reschedule)
682 return isl_schedule_copy(ps->schedule);
683 return compute_cpu_schedule(ps);
686 /* Compute a schedule based on the dependences in "ps" and
687 * tile it if requested by the user.
689 static __isl_give isl_schedule *get_schedule(struct ppcg_scop *ps,
690 struct ppcg_options *options)
692 isl_ctx *ctx;
693 isl_schedule *schedule;
695 if (!ps)
696 return NULL;
698 ctx = isl_union_set_get_ctx(ps->domain);
699 schedule = ppcg_get_schedule(ctx, options,
700 &optionally_compute_schedule, ps);
701 if (ps->options->tile)
702 schedule = isl_schedule_map_schedule_node_bottom_up(schedule,
703 &tile_band, ps);
705 return schedule;
708 /* Generate CPU code for the scop "ps" using "schedule" and
709 * print the corresponding C code to "p", including variable declarations.
711 static __isl_give isl_printer *print_cpu_with_schedule(
712 __isl_take isl_printer *p, struct ppcg_scop *ps,
713 __isl_take isl_schedule *schedule, struct ppcg_options *options)
715 int hidden;
716 isl_set *context;
718 p = isl_printer_start_line(p);
719 p = isl_printer_print_str(p, "/* ppcg generated CPU code */");
720 p = isl_printer_end_line(p);
722 p = isl_printer_start_line(p);
723 p = isl_printer_end_line(p);
725 p = ppcg_set_macro_names(p);
726 p = ppcg_print_exposed_declarations(p, ps);
727 hidden = ppcg_scop_any_hidden_declarations(ps);
728 if (hidden) {
729 p = ppcg_start_block(p);
730 p = ppcg_print_hidden_declarations(p, ps);
733 context = isl_set_copy(ps->context);
734 context = isl_set_from_params(context);
735 schedule = isl_schedule_insert_context(schedule, context);
736 if (options->debug->dump_final_schedule)
737 isl_schedule_dump(schedule);
738 p = print_scop(ps, schedule, p, options);
739 if (hidden)
740 p = ppcg_end_block(p);
742 return p;
745 /* Generate CPU code for the scop "ps" and print the corresponding C code
746 * to "p", including variable declarations.
748 __isl_give isl_printer *print_cpu(__isl_take isl_printer *p,
749 struct ppcg_scop *ps, struct ppcg_options *options)
751 isl_schedule *schedule;
753 schedule = isl_schedule_copy(ps->schedule);
754 return print_cpu_with_schedule(p, ps, schedule, options);
757 /* Generate CPU code for "scop" and print it to "p".
759 * First obtain a schedule for "scop" and then print code for "scop"
760 * using that schedule.
762 static __isl_give isl_printer *generate(__isl_take isl_printer *p,
763 struct ppcg_scop *scop, struct ppcg_options *options)
765 isl_schedule *schedule;
767 schedule = get_schedule(scop, options);
769 return print_cpu_with_schedule(p, scop, schedule, options);
772 /* Wrapper around generate for use as a ppcg_transform callback.
774 static __isl_give isl_printer *print_cpu_wrap(__isl_take isl_printer *p,
775 struct ppcg_scop *scop, void *user)
777 struct ppcg_options *options = user;
779 return generate(p, scop, options);
782 /* Transform the code in the file called "input" by replacing
783 * all scops by corresponding CPU code and write the results to a file
784 * called "output".
786 int generate_cpu(isl_ctx *ctx, struct ppcg_options *options,
787 const char *input, const char *output)
789 FILE *output_file;
790 int r;
792 output_file = get_output_file(input, output);
793 if (!output_file)
794 return -1;
796 r = ppcg_transform(ctx, input, output_file, options,
797 &print_cpu_wrap, options);
799 fclose(output_file);
801 return r;