1 /* Translation of isl AST to Gimple.
2 Copyright (C) 2014-2019 Free Software Foundation, Inc.
3 Contributed by Roman Gareev <gareevroman@gmail.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
29 #include "coretypes.h"
36 #include "fold-const.h"
37 #include "gimple-fold.h"
38 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
42 #include "tree-ssa-loop.h"
43 #include "tree-ssa-operands.h"
44 #include "tree-ssa-propagate.h"
45 #include "tree-pass.h"
47 #include "tree-data-ref.h"
48 #include "tree-ssa-loop-manip.h"
49 #include "tree-scalar-evolution.h"
50 #include "gimple-ssa.h"
51 #include "tree-phinodes.h"
52 #include "tree-into-ssa.h"
53 #include "ssa-iterators.h"
55 #include "gimple-pretty-print.h"
57 #include "value-prof.h"
59 #include "tree-vectorizer.h"
65 : is_parallelizable(false)
67 bool is_parallelizable
;
70 /* IVS_PARAMS maps isl's scattering and parameter identifiers
71 to corresponding trees. */
73 typedef std::map
<isl_id
*, tree
> ivs_params
;
75 /* Free all memory allocated for isl's identifiers. */
77 static void ivs_params_clear (ivs_params
&ip
)
79 std::map
<isl_id
*, tree
>::iterator it
;
80 for (it
= ip
.begin ();
81 it
!= ip
.end (); it
++)
83 isl_id_free (it
->first
);
87 /* Set the "separate" option for the schedule node. */
89 static isl_schedule_node
*
90 set_separate_option (__isl_take isl_schedule_node
*node
, void *user
)
95 if (isl_schedule_node_get_type (node
) != isl_schedule_node_band
)
98 /* Set the "separate" option unless it is set earlier to another option. */
99 if (isl_schedule_node_band_member_get_ast_loop_type (node
, 0)
100 == isl_ast_loop_default
)
101 return isl_schedule_node_band_member_set_ast_loop_type
102 (node
, 0, isl_ast_loop_separate
);
107 /* Print SCHEDULE under an AST form on file F. */
110 print_schedule_ast (FILE *f
, __isl_keep isl_schedule
*schedule
, scop_p scop
)
112 isl_set
*set
= isl_set_params (isl_set_copy (scop
->param_context
));
113 isl_ast_build
*context
= isl_ast_build_from_context (set
);
115 = isl_ast_build_node_from_schedule (context
, isl_schedule_copy (schedule
));
116 isl_ast_build_free (context
);
117 print_isl_ast (f
, ast
);
118 isl_ast_node_free (ast
);
122 debug_schedule_ast (__isl_keep isl_schedule
*s
, scop_p scop
)
124 print_schedule_ast (stderr
, s
, scop
);
135 class translate_isl_ast_to_gimple
138 translate_isl_ast_to_gimple (sese_info_p r
);
139 edge
translate_isl_ast (loop_p context_loop
, __isl_keep isl_ast_node
*node
,
140 edge next_e
, ivs_params
&ip
);
141 edge
translate_isl_ast_node_for (loop_p context_loop
,
142 __isl_keep isl_ast_node
*node
,
143 edge next_e
, ivs_params
&ip
);
144 edge
translate_isl_ast_for_loop (loop_p context_loop
,
145 __isl_keep isl_ast_node
*node_for
,
147 tree type
, tree lb
, tree ub
,
149 edge
translate_isl_ast_node_if (loop_p context_loop
,
150 __isl_keep isl_ast_node
*node
,
151 edge next_e
, ivs_params
&ip
);
152 edge
translate_isl_ast_node_user (__isl_keep isl_ast_node
*node
,
153 edge next_e
, ivs_params
&ip
);
154 edge
translate_isl_ast_node_block (loop_p context_loop
,
155 __isl_keep isl_ast_node
*node
,
156 edge next_e
, ivs_params
&ip
);
157 tree
unary_op_to_tree (tree type
, __isl_take isl_ast_expr
*expr
,
159 tree
binary_op_to_tree (tree type
, __isl_take isl_ast_expr
*expr
,
161 tree
ternary_op_to_tree (tree type
, __isl_take isl_ast_expr
*expr
,
163 tree
nary_op_to_tree (tree type
, __isl_take isl_ast_expr
*expr
,
165 tree
gcc_expression_from_isl_expression (tree type
,
166 __isl_take isl_ast_expr
*,
168 tree
gcc_expression_from_isl_ast_expr_id (tree type
,
169 __isl_keep isl_ast_expr
*expr_id
,
171 widest_int
widest_int_from_isl_expr_int (__isl_keep isl_ast_expr
*expr
);
172 tree
gcc_expression_from_isl_expr_int (tree type
,
173 __isl_take isl_ast_expr
*expr
);
174 tree
gcc_expression_from_isl_expr_op (tree type
,
175 __isl_take isl_ast_expr
*expr
,
177 struct loop
*graphite_create_new_loop (edge entry_edge
,
178 __isl_keep isl_ast_node
*node_for
,
179 loop_p outer
, tree type
,
180 tree lb
, tree ub
, ivs_params
&ip
);
181 edge
graphite_create_new_guard (edge entry_edge
,
182 __isl_take isl_ast_expr
*if_cond
,
184 void build_iv_mapping (vec
<tree
> iv_map
, gimple_poly_bb_p gbb
,
185 __isl_keep isl_ast_expr
*user_expr
, ivs_params
&ip
,
187 void add_parameters_to_ivs_params (scop_p scop
, ivs_params
&ip
);
188 __isl_give isl_ast_build
*generate_isl_context (scop_p scop
);
190 __isl_give isl_ast_node
* scop_to_isl_ast (scop_p scop
);
192 tree
get_rename_from_scev (tree old_name
, gimple_seq
*stmts
, loop_p loop
,
194 void graphite_copy_stmts_from_block (basic_block bb
, basic_block new_bb
,
196 edge
copy_bb_and_scalar_dependences (basic_block bb
, edge next_e
,
198 void set_rename (tree old_name
, tree expr
);
199 void gsi_insert_earliest (gimple_seq seq
);
200 bool codegen_error_p () const { return codegen_error
; }
202 void set_codegen_error ()
204 codegen_error
= true;
205 gcc_assert (! flag_checking
206 || PARAM_VALUE (PARAM_GRAPHITE_ALLOW_CODEGEN_ERRORS
));
209 bool is_constant (tree op
) const
211 return TREE_CODE (op
) == INTEGER_CST
212 || TREE_CODE (op
) == REAL_CST
213 || TREE_CODE (op
) == COMPLEX_CST
214 || TREE_CODE (op
) == VECTOR_CST
;
218 /* The region to be translated. */
221 /* This flag is set when an error occurred during the translation of isl AST
225 /* A vector of all the edges at if_condition merge points. */
226 auto_vec
<edge
, 2> merge_points
;
228 tree graphite_expr_type
;
231 translate_isl_ast_to_gimple::translate_isl_ast_to_gimple (sese_info_p r
)
232 : region (r
), codegen_error (false)
234 /* We always try to use signed 128 bit types, but fall back to smaller types
235 in case a platform does not provide types of these sizes. In the future we
236 should use isl to derive the optimal type for each subexpression. */
237 int max_mode_int_precision
238 = GET_MODE_PRECISION (int_mode_for_size (MAX_FIXED_MODE_SIZE
, 0).require ());
239 int graphite_expr_type_precision
240 = 128 <= max_mode_int_precision
? 128 : max_mode_int_precision
;
242 = build_nonstandard_integer_type (graphite_expr_type_precision
, 0);
245 /* Return the tree variable that corresponds to the given isl ast identifier
246 expression (an isl_ast_expr of type isl_ast_expr_id).
248 FIXME: We should replace blind conversion of id's type with derivation
249 of the optimal type when we get the corresponding isl support. Blindly
250 converting type sizes may be problematic when we switch to smaller
253 tree
translate_isl_ast_to_gimple::
254 gcc_expression_from_isl_ast_expr_id (tree type
,
255 __isl_take isl_ast_expr
*expr_id
,
258 gcc_assert (isl_ast_expr_get_type (expr_id
) == isl_ast_expr_id
);
259 isl_id
*tmp_isl_id
= isl_ast_expr_get_id (expr_id
);
260 std::map
<isl_id
*, tree
>::iterator res
;
261 res
= ip
.find (tmp_isl_id
);
262 isl_id_free (tmp_isl_id
);
263 gcc_assert (res
!= ip
.end () &&
264 "Could not map isl_id to tree expression");
265 isl_ast_expr_free (expr_id
);
266 tree t
= res
->second
;
267 if (useless_type_conversion_p (type
, TREE_TYPE (t
)))
269 return fold_convert (type
, t
);
272 /* Converts an isl_ast_expr_int expression E to a widest_int.
273 Raises a code generation error when the constant doesn't fit. */
275 widest_int
translate_isl_ast_to_gimple::
276 widest_int_from_isl_expr_int (__isl_keep isl_ast_expr
*expr
)
278 gcc_assert (isl_ast_expr_get_type (expr
) == isl_ast_expr_int
);
279 isl_val
*val
= isl_ast_expr_get_val (expr
);
280 size_t n
= isl_val_n_abs_num_chunks (val
, sizeof (HOST_WIDE_INT
));
281 HOST_WIDE_INT
*chunks
= XALLOCAVEC (HOST_WIDE_INT
, n
);
282 if (n
> WIDE_INT_MAX_ELTS
283 || isl_val_get_abs_num_chunks (val
, sizeof (HOST_WIDE_INT
), chunks
) == -1)
286 set_codegen_error ();
289 widest_int wi
= widest_int::from_array (chunks
, n
, true);
290 if (isl_val_is_neg (val
))
296 /* Converts an isl_ast_expr_int expression E to a GCC expression tree of
297 type TYPE. Raises a code generation error when the constant doesn't fit. */
299 tree
translate_isl_ast_to_gimple::
300 gcc_expression_from_isl_expr_int (tree type
, __isl_take isl_ast_expr
*expr
)
302 widest_int wi
= widest_int_from_isl_expr_int (expr
);
303 isl_ast_expr_free (expr
);
304 if (codegen_error_p ())
306 if (wi::min_precision (wi
, TYPE_SIGN (type
)) > TYPE_PRECISION (type
))
308 set_codegen_error ();
311 return wide_int_to_tree (type
, wi
);
314 /* Converts a binary isl_ast_expr_op expression E to a GCC expression tree of
317 tree
translate_isl_ast_to_gimple::
318 binary_op_to_tree (tree type
, __isl_take isl_ast_expr
*expr
, ivs_params
&ip
)
320 enum isl_ast_op_type expr_type
= isl_ast_expr_get_op_type (expr
);
321 isl_ast_expr
*arg_expr
= isl_ast_expr_get_op_arg (expr
, 0);
322 tree tree_lhs_expr
= gcc_expression_from_isl_expression (type
, arg_expr
, ip
);
323 arg_expr
= isl_ast_expr_get_op_arg (expr
, 1);
324 isl_ast_expr_free (expr
);
326 /* From our constraint generation we may get modulo operations that
327 we cannot represent explicitely but that are no-ops for TYPE.
329 if ((expr_type
== isl_ast_op_pdiv_r
330 || expr_type
== isl_ast_op_zdiv_r
331 || expr_type
== isl_ast_op_add
)
332 && isl_ast_expr_get_type (arg_expr
) == isl_ast_expr_int
333 && (wi::exact_log2 (widest_int_from_isl_expr_int (arg_expr
))
334 >= TYPE_PRECISION (type
)))
336 isl_ast_expr_free (arg_expr
);
337 return tree_lhs_expr
;
340 tree tree_rhs_expr
= gcc_expression_from_isl_expression (type
, arg_expr
, ip
);
341 if (codegen_error_p ())
347 return fold_build2 (PLUS_EXPR
, type
, tree_lhs_expr
, tree_rhs_expr
);
350 return fold_build2 (MINUS_EXPR
, type
, tree_lhs_expr
, tree_rhs_expr
);
353 return fold_build2 (MULT_EXPR
, type
, tree_lhs_expr
, tree_rhs_expr
);
356 return fold_build2 (EXACT_DIV_EXPR
, type
, tree_lhs_expr
, tree_rhs_expr
);
358 case isl_ast_op_pdiv_q
:
359 return fold_build2 (TRUNC_DIV_EXPR
, type
, tree_lhs_expr
, tree_rhs_expr
);
361 case isl_ast_op_zdiv_r
:
362 case isl_ast_op_pdiv_r
:
363 return fold_build2 (TRUNC_MOD_EXPR
, type
, tree_lhs_expr
, tree_rhs_expr
);
365 case isl_ast_op_fdiv_q
:
366 return fold_build2 (FLOOR_DIV_EXPR
, type
, tree_lhs_expr
, tree_rhs_expr
);
369 return fold_build2 (TRUTH_ANDIF_EXPR
, type
,
370 tree_lhs_expr
, tree_rhs_expr
);
373 return fold_build2 (TRUTH_ORIF_EXPR
, type
, tree_lhs_expr
, tree_rhs_expr
);
376 return fold_build2 (EQ_EXPR
, type
, tree_lhs_expr
, tree_rhs_expr
);
379 return fold_build2 (LE_EXPR
, type
, tree_lhs_expr
, tree_rhs_expr
);
382 return fold_build2 (LT_EXPR
, type
, tree_lhs_expr
, tree_rhs_expr
);
385 return fold_build2 (GE_EXPR
, type
, tree_lhs_expr
, tree_rhs_expr
);
388 return fold_build2 (GT_EXPR
, type
, tree_lhs_expr
, tree_rhs_expr
);
395 /* Converts a ternary isl_ast_expr_op expression E to a GCC expression tree of
398 tree
translate_isl_ast_to_gimple::
399 ternary_op_to_tree (tree type
, __isl_take isl_ast_expr
*expr
, ivs_params
&ip
)
401 enum isl_ast_op_type t
= isl_ast_expr_get_op_type (expr
);
402 gcc_assert (t
== isl_ast_op_cond
|| t
== isl_ast_op_select
);
403 isl_ast_expr
*arg_expr
= isl_ast_expr_get_op_arg (expr
, 0);
404 tree a
= gcc_expression_from_isl_expression (type
, arg_expr
, ip
);
405 arg_expr
= isl_ast_expr_get_op_arg (expr
, 1);
406 tree b
= gcc_expression_from_isl_expression (type
, arg_expr
, ip
);
407 arg_expr
= isl_ast_expr_get_op_arg (expr
, 2);
408 tree c
= gcc_expression_from_isl_expression (type
, arg_expr
, ip
);
409 isl_ast_expr_free (expr
);
411 if (codegen_error_p ())
414 return fold_build3 (COND_EXPR
, type
, a
,
415 rewrite_to_non_trapping_overflow (b
),
416 rewrite_to_non_trapping_overflow (c
));
419 /* Converts a unary isl_ast_expr_op expression E to a GCC expression tree of
422 tree
translate_isl_ast_to_gimple::
423 unary_op_to_tree (tree type
, __isl_take isl_ast_expr
*expr
, ivs_params
&ip
)
425 gcc_assert (isl_ast_expr_get_op_type (expr
) == isl_ast_op_minus
);
426 isl_ast_expr
*arg_expr
= isl_ast_expr_get_op_arg (expr
, 0);
427 tree tree_expr
= gcc_expression_from_isl_expression (type
, arg_expr
, ip
);
428 isl_ast_expr_free (expr
);
429 return codegen_error_p () ? NULL_TREE
430 : fold_build1 (NEGATE_EXPR
, type
, tree_expr
);
433 /* Converts an isl_ast_expr_op expression E with unknown number of arguments
434 to a GCC expression tree of type TYPE. */
436 tree
translate_isl_ast_to_gimple::
437 nary_op_to_tree (tree type
, __isl_take isl_ast_expr
*expr
, ivs_params
&ip
)
439 enum tree_code op_code
;
440 switch (isl_ast_expr_get_op_type (expr
))
453 isl_ast_expr
*arg_expr
= isl_ast_expr_get_op_arg (expr
, 0);
454 tree res
= gcc_expression_from_isl_expression (type
, arg_expr
, ip
);
456 if (codegen_error_p ())
458 isl_ast_expr_free (expr
);
463 for (i
= 1; i
< isl_ast_expr_get_op_n_arg (expr
); i
++)
465 arg_expr
= isl_ast_expr_get_op_arg (expr
, i
);
466 tree t
= gcc_expression_from_isl_expression (type
, arg_expr
, ip
);
468 if (codegen_error_p ())
470 isl_ast_expr_free (expr
);
474 res
= fold_build2 (op_code
, type
, res
, t
);
476 isl_ast_expr_free (expr
);
480 /* Converts an isl_ast_expr_op expression E to a GCC expression tree of
483 tree
translate_isl_ast_to_gimple::
484 gcc_expression_from_isl_expr_op (tree type
, __isl_take isl_ast_expr
*expr
,
487 if (codegen_error_p ())
489 isl_ast_expr_free (expr
);
493 gcc_assert (isl_ast_expr_get_type (expr
) == isl_ast_expr_op
);
494 switch (isl_ast_expr_get_op_type (expr
))
496 /* These isl ast expressions are not supported yet. */
497 case isl_ast_op_error
:
498 case isl_ast_op_call
:
499 case isl_ast_op_and_then
:
500 case isl_ast_op_or_else
:
505 return nary_op_to_tree (type
, expr
, ip
);
511 case isl_ast_op_pdiv_q
:
512 case isl_ast_op_pdiv_r
:
513 case isl_ast_op_fdiv_q
:
514 case isl_ast_op_zdiv_r
:
522 return binary_op_to_tree (type
, expr
, ip
);
524 case isl_ast_op_minus
:
525 return unary_op_to_tree (type
, expr
, ip
);
527 case isl_ast_op_cond
:
528 case isl_ast_op_select
:
529 return ternary_op_to_tree (type
, expr
, ip
);
538 /* Converts an isl AST expression E back to a GCC expression tree of
541 tree
translate_isl_ast_to_gimple::
542 gcc_expression_from_isl_expression (tree type
, __isl_take isl_ast_expr
*expr
,
545 if (codegen_error_p ())
547 isl_ast_expr_free (expr
);
551 switch (isl_ast_expr_get_type (expr
))
553 case isl_ast_expr_id
:
554 return gcc_expression_from_isl_ast_expr_id (type
, expr
, ip
);
556 case isl_ast_expr_int
:
557 return gcc_expression_from_isl_expr_int (type
, expr
);
559 case isl_ast_expr_op
:
560 return gcc_expression_from_isl_expr_op (type
, expr
, ip
);
569 /* Creates a new LOOP corresponding to isl_ast_node_for. Inserts an
570 induction variable for the new LOOP. New LOOP is attached to CFG
571 starting at ENTRY_EDGE. LOOP is inserted into the loop tree and
572 becomes the child loop of the OUTER_LOOP. NEWIVS_INDEX binds
573 isl's scattering name to the induction variable created for the
574 loop of STMT. The new induction variable is inserted in the NEWIVS
575 vector and is of type TYPE. */
577 struct loop
*translate_isl_ast_to_gimple::
578 graphite_create_new_loop (edge entry_edge
, __isl_keep isl_ast_node
*node_for
,
579 loop_p outer
, tree type
, tree lb
, tree ub
,
582 isl_ast_expr
*for_inc
= isl_ast_node_for_get_inc (node_for
);
583 tree stride
= gcc_expression_from_isl_expression (type
, for_inc
, ip
);
585 /* To fail code generation, we generate wrong code until we discard it. */
586 if (codegen_error_p ())
587 stride
= integer_zero_node
;
589 tree ivvar
= create_tmp_var (type
, "graphite_IV");
590 tree iv
, iv_after_increment
;
591 loop_p loop
= create_empty_loop_on_edge
592 (entry_edge
, lb
, stride
, ub
, ivvar
, &iv
, &iv_after_increment
,
593 outer
? outer
: entry_edge
->src
->loop_father
);
595 isl_ast_expr
*for_iterator
= isl_ast_node_for_get_iterator (node_for
);
596 isl_id
*id
= isl_ast_expr_get_id (for_iterator
);
597 std::map
<isl_id
*, tree
>::iterator res
;
600 isl_id_free (res
->first
);
602 isl_ast_expr_free (for_iterator
);
606 /* Create the loop for a isl_ast_node_for.
608 - NEXT_E is the edge where new generated code should be attached. */
610 edge
translate_isl_ast_to_gimple::
611 translate_isl_ast_for_loop (loop_p context_loop
,
612 __isl_keep isl_ast_node
*node_for
, edge next_e
,
613 tree type
, tree lb
, tree ub
,
616 gcc_assert (isl_ast_node_get_type (node_for
) == isl_ast_node_for
);
617 struct loop
*loop
= graphite_create_new_loop (next_e
, node_for
, context_loop
,
619 edge last_e
= single_exit (loop
);
620 edge to_body
= single_succ_edge (loop
->header
);
621 basic_block after
= to_body
->dest
;
623 /* Translate the body of the loop. */
624 isl_ast_node
*for_body
= isl_ast_node_for_get_body (node_for
);
625 next_e
= translate_isl_ast (loop
, for_body
, to_body
, ip
);
626 isl_ast_node_free (for_body
);
628 /* Early return if we failed to translate loop body. */
629 if (!next_e
|| codegen_error_p ())
632 if (next_e
->dest
!= after
)
633 redirect_edge_succ_nodup (next_e
, after
);
634 set_immediate_dominator (CDI_DOMINATORS
, next_e
->dest
, next_e
->src
);
636 if (flag_loop_parallelize_all
)
638 isl_id
*id
= isl_ast_node_get_annotation (node_for
);
640 ast_build_info
*for_info
= (ast_build_info
*) isl_id_get_user (id
);
641 loop
->can_be_parallel
= for_info
->is_parallelizable
;
649 /* We use this function to get the upper bound because of the form,
650 which is used by isl to represent loops:
652 for (iterator = init; cond; iterator += inc)
660 The loop condition is an arbitrary expression, which contains the
661 current loop iterator.
663 (e.g. iterator + 3 < B && C > iterator + A)
665 We have to know the upper bound of the iterator to generate a loop
666 in Gimple form. It can be obtained from the special representation
667 of the loop condition, which is generated by isl,
668 if the ast_build_atomic_upper_bound option is set. In this case,
669 isl generates a loop condition that consists of the current loop
670 iterator, + an operator (< or <=) and an expression not involving
671 the iterator, which is processed and returned by this function.
673 (e.g iterator <= upper-bound-expression-without-iterator) */
675 static __isl_give isl_ast_expr
*
676 get_upper_bound (__isl_keep isl_ast_node
*node_for
)
678 gcc_assert (isl_ast_node_get_type (node_for
) == isl_ast_node_for
);
679 isl_ast_expr
*for_cond
= isl_ast_node_for_get_cond (node_for
);
680 gcc_assert (isl_ast_expr_get_type (for_cond
) == isl_ast_expr_op
);
682 switch (isl_ast_expr_get_op_type (for_cond
))
685 res
= isl_ast_expr_get_op_arg (for_cond
, 1);
690 /* (iterator < ub) => (iterator <= ub - 1). */
692 isl_val_int_from_si (isl_ast_expr_get_ctx (for_cond
), 1);
693 isl_ast_expr
*ub
= isl_ast_expr_get_op_arg (for_cond
, 1);
694 res
= isl_ast_expr_sub (ub
, isl_ast_expr_from_val (one
));
701 isl_ast_expr_free (for_cond
);
705 /* Translates an isl_ast_node_for to Gimple. */
707 edge
translate_isl_ast_to_gimple::
708 translate_isl_ast_node_for (loop_p context_loop
, __isl_keep isl_ast_node
*node
,
709 edge next_e
, ivs_params
&ip
)
711 gcc_assert (isl_ast_node_get_type (node
) == isl_ast_node_for
);
712 tree type
= graphite_expr_type
;
714 isl_ast_expr
*for_init
= isl_ast_node_for_get_init (node
);
715 tree lb
= gcc_expression_from_isl_expression (type
, for_init
, ip
);
716 /* To fail code generation, we generate wrong code until we discard it. */
717 if (codegen_error_p ())
718 lb
= integer_zero_node
;
720 isl_ast_expr
*upper_bound
= get_upper_bound (node
);
721 tree ub
= gcc_expression_from_isl_expression (type
, upper_bound
, ip
);
722 /* To fail code generation, we generate wrong code until we discard it. */
723 if (codegen_error_p ())
724 ub
= integer_zero_node
;
726 edge last_e
= single_succ_edge (split_edge (next_e
));
728 /* Compensate for the fact that we emit a do { } while loop from
730 ??? We often miss constraints on niter because the SESE region
731 doesn't cover loop header copies. Ideally we'd add constraints
732 for all relevant dominating conditions. */
733 if (TREE_CODE (lb
) == INTEGER_CST
&& TREE_CODE (ub
) == INTEGER_CST
734 && tree_int_cst_compare (lb
, ub
) <= 0)
738 tree one
= build_one_cst (POINTER_TYPE_P (type
) ? sizetype
: type
);
739 /* Adding +1 and using LT_EXPR helps with loop latches that have a
740 loop iteration count of "PARAMETER - 1". For PARAMETER == 0 this
741 becomes 2^k-1 due to integer overflow, and the condition lb <= ub
742 is true, even if we do not want this. However lb < ub + 1 is false,
744 tree ub_one
= fold_build2 (POINTER_TYPE_P (type
)
745 ? POINTER_PLUS_EXPR
: PLUS_EXPR
,
746 type
, unshare_expr (ub
), one
);
747 create_empty_if_region_on_edge (next_e
,
748 fold_build2 (LT_EXPR
, boolean_type_node
,
749 unshare_expr (lb
), ub_one
));
750 next_e
= get_true_edge_from_guard_bb (next_e
->dest
);
753 translate_isl_ast_for_loop (context_loop
, node
, next_e
,
758 /* Inserts in iv_map a tuple (OLD_LOOP->num, NEW_NAME) for the induction
759 variables of the loops around GBB in SESE.
761 FIXME: Instead of using a vec<tree> that maps each loop id to a possible
762 chrec, we could consider using a map<int, tree> that maps loop ids to the
763 corresponding tree expressions. */
765 void translate_isl_ast_to_gimple::
766 build_iv_mapping (vec
<tree
> iv_map
, gimple_poly_bb_p gbb
,
767 __isl_keep isl_ast_expr
*user_expr
, ivs_params
&ip
,
770 gcc_assert (isl_ast_expr_get_type (user_expr
) == isl_ast_expr_op
&&
771 isl_ast_expr_get_op_type (user_expr
) == isl_ast_op_call
);
773 isl_ast_expr
*arg_expr
;
774 for (i
= 1; i
< isl_ast_expr_get_op_n_arg (user_expr
); i
++)
776 arg_expr
= isl_ast_expr_get_op_arg (user_expr
, i
);
777 tree type
= graphite_expr_type
;
778 tree t
= gcc_expression_from_isl_expression (type
, arg_expr
, ip
);
780 /* To fail code generation, we generate wrong code until we discard it. */
781 if (codegen_error_p ())
782 t
= integer_zero_node
;
784 loop_p old_loop
= gbb_loop_at_index (gbb
, region
, i
- 1);
785 iv_map
[old_loop
->num
] = t
;
789 /* Translates an isl_ast_node_user to Gimple.
791 FIXME: We should remove iv_map.create (loop->num + 1), if it is possible. */
793 edge
translate_isl_ast_to_gimple::
794 translate_isl_ast_node_user (__isl_keep isl_ast_node
*node
,
795 edge next_e
, ivs_params
&ip
)
797 gcc_assert (isl_ast_node_get_type (node
) == isl_ast_node_user
);
799 isl_ast_expr
*user_expr
= isl_ast_node_user_get_expr (node
);
800 isl_ast_expr
*name_expr
= isl_ast_expr_get_op_arg (user_expr
, 0);
801 gcc_assert (isl_ast_expr_get_type (name_expr
) == isl_ast_expr_id
);
803 isl_id
*name_id
= isl_ast_expr_get_id (name_expr
);
804 poly_bb_p pbb
= (poly_bb_p
) isl_id_get_user (name_id
);
807 gimple_poly_bb_p gbb
= PBB_BLACK_BOX (pbb
);
809 isl_ast_expr_free (name_expr
);
810 isl_id_free (name_id
);
812 gcc_assert (GBB_BB (gbb
) != ENTRY_BLOCK_PTR_FOR_FN (cfun
) &&
813 "The entry block should not even appear within a scop");
815 const int nb_loops
= number_of_loops (cfun
);
817 iv_map
.create (nb_loops
);
818 iv_map
.safe_grow_cleared (nb_loops
);
820 build_iv_mapping (iv_map
, gbb
, user_expr
, ip
, pbb
->scop
->scop_info
->region
);
821 isl_ast_expr_free (user_expr
);
823 basic_block old_bb
= GBB_BB (gbb
);
824 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
827 "[codegen] copying from bb_%d on edge (bb_%d, bb_%d)\n",
828 old_bb
->index
, next_e
->src
->index
, next_e
->dest
->index
);
829 print_loops_bb (dump_file
, GBB_BB (gbb
), 0, 3);
832 next_e
= copy_bb_and_scalar_dependences (old_bb
, next_e
, iv_map
);
836 if (codegen_error_p ())
839 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
841 fprintf (dump_file
, "[codegen] (after copy) new basic block\n");
842 print_loops_bb (dump_file
, next_e
->src
, 0, 3);
848 /* Translates an isl_ast_node_block to Gimple. */
850 edge
translate_isl_ast_to_gimple::
851 translate_isl_ast_node_block (loop_p context_loop
,
852 __isl_keep isl_ast_node
*node
,
853 edge next_e
, ivs_params
&ip
)
855 gcc_assert (isl_ast_node_get_type (node
) == isl_ast_node_block
);
856 isl_ast_node_list
*node_list
= isl_ast_node_block_get_children (node
);
858 for (i
= 0; i
< isl_ast_node_list_n_ast_node (node_list
); i
++)
860 isl_ast_node
*tmp_node
= isl_ast_node_list_get_ast_node (node_list
, i
);
861 next_e
= translate_isl_ast (context_loop
, tmp_node
, next_e
, ip
);
862 isl_ast_node_free (tmp_node
);
864 isl_ast_node_list_free (node_list
);
868 /* Creates a new if region corresponding to isl's cond. */
870 edge
translate_isl_ast_to_gimple::
871 graphite_create_new_guard (edge entry_edge
, __isl_take isl_ast_expr
*if_cond
,
874 tree type
= graphite_expr_type
;
875 tree cond_expr
= gcc_expression_from_isl_expression (type
, if_cond
, ip
);
877 /* To fail code generation, we generate wrong code until we discard it. */
878 if (codegen_error_p ())
879 cond_expr
= integer_zero_node
;
881 edge exit_edge
= create_empty_if_region_on_edge (entry_edge
, cond_expr
);
885 /* Translates an isl_ast_node_if to Gimple. */
887 edge
translate_isl_ast_to_gimple::
888 translate_isl_ast_node_if (loop_p context_loop
,
889 __isl_keep isl_ast_node
*node
,
890 edge next_e
, ivs_params
&ip
)
892 gcc_assert (isl_ast_node_get_type (node
) == isl_ast_node_if
);
893 isl_ast_expr
*if_cond
= isl_ast_node_if_get_cond (node
);
894 edge last_e
= graphite_create_new_guard (next_e
, if_cond
, ip
);
895 edge true_e
= get_true_edge_from_guard_bb (next_e
->dest
);
896 merge_points
.safe_push (last_e
);
898 isl_ast_node
*then_node
= isl_ast_node_if_get_then (node
);
899 translate_isl_ast (context_loop
, then_node
, true_e
, ip
);
900 isl_ast_node_free (then_node
);
902 edge false_e
= get_false_edge_from_guard_bb (next_e
->dest
);
903 isl_ast_node
*else_node
= isl_ast_node_if_get_else (node
);
904 if (isl_ast_node_get_type (else_node
) != isl_ast_node_error
)
905 translate_isl_ast (context_loop
, else_node
, false_e
, ip
);
907 isl_ast_node_free (else_node
);
911 /* Translates an isl AST node NODE to GCC representation in the
912 context of a SESE. */
914 edge
translate_isl_ast_to_gimple::
915 translate_isl_ast (loop_p context_loop
, __isl_keep isl_ast_node
*node
,
916 edge next_e
, ivs_params
&ip
)
918 if (codegen_error_p ())
921 switch (isl_ast_node_get_type (node
))
923 case isl_ast_node_error
:
926 case isl_ast_node_for
:
927 return translate_isl_ast_node_for (context_loop
, node
,
930 case isl_ast_node_if
:
931 return translate_isl_ast_node_if (context_loop
, node
,
934 case isl_ast_node_user
:
935 return translate_isl_ast_node_user (node
, next_e
, ip
);
937 case isl_ast_node_block
:
938 return translate_isl_ast_node_block (context_loop
, node
,
941 case isl_ast_node_mark
:
943 isl_ast_node
*n
= isl_ast_node_mark_get_node (node
);
944 edge e
= translate_isl_ast (context_loop
, n
, next_e
, ip
);
945 isl_ast_node_free (n
);
954 /* Register in RENAME_MAP the rename tuple (OLD_NAME, EXPR).
955 When OLD_NAME and EXPR are the same we assert. */
957 void translate_isl_ast_to_gimple::
958 set_rename (tree old_name
, tree expr
)
962 fprintf (dump_file
, "[codegen] setting rename: old_name = ");
963 print_generic_expr (dump_file
, old_name
);
964 fprintf (dump_file
, ", new decl = ");
965 print_generic_expr (dump_file
, expr
);
966 fprintf (dump_file
, "\n");
968 bool res
= region
->rename_map
->put (old_name
, expr
);
972 /* Return an iterator to the instructions comes last in the execution order.
973 Either GSI1 and GSI2 should belong to the same basic block or one of their
974 respective basic blocks should dominate the other. */
977 later_of_the_two (gimple_stmt_iterator gsi1
, gimple_stmt_iterator gsi2
)
979 basic_block bb1
= gsi_bb (gsi1
);
980 basic_block bb2
= gsi_bb (gsi2
);
982 /* Find the iterator which is the latest. */
985 gimple
*stmt1
= gsi_stmt (gsi1
);
986 gimple
*stmt2
= gsi_stmt (gsi2
);
988 if (stmt1
!= NULL
&& stmt2
!= NULL
)
990 bool is_phi1
= gimple_code (stmt1
) == GIMPLE_PHI
;
991 bool is_phi2
= gimple_code (stmt2
) == GIMPLE_PHI
;
993 if (is_phi1
!= is_phi2
)
994 return is_phi1
? gsi2
: gsi1
;
997 /* For empty basic blocks gsis point to the end of the sequence. Since
998 there is no operator== defined for gimple_stmt_iterator and for gsis
999 not pointing to a valid statement gsi_next would assert. */
1000 gimple_stmt_iterator gsi
= gsi1
;
1002 if (gsi_stmt (gsi
) == gsi_stmt (gsi2
))
1005 } while (!gsi_end_p (gsi
));
1010 /* Find the basic block closest to the basic block which defines stmt. */
1011 if (dominated_by_p (CDI_DOMINATORS
, bb1
, bb2
))
1014 gcc_assert (dominated_by_p (CDI_DOMINATORS
, bb2
, bb1
));
1018 /* Insert each statement from SEQ at its earliest insertion p. */
1020 void translate_isl_ast_to_gimple::
1021 gsi_insert_earliest (gimple_seq seq
)
1023 update_modified_stmts (seq
);
1024 sese_l
&codegen_region
= region
->if_region
->true_region
->region
;
1025 basic_block begin_bb
= get_entry_bb (codegen_region
);
1027 /* Inserting the gimple statements in a vector because gimple_seq behave
1028 in strage ways when inserting the stmts from it into different basic
1029 blocks one at a time. */
1030 auto_vec
<gimple
*, 3> stmts
;
1031 for (gimple_stmt_iterator gsi
= gsi_start (seq
); !gsi_end_p (gsi
);
1033 stmts
.safe_push (gsi_stmt (gsi
));
1037 FOR_EACH_VEC_ELT (stmts
, i
, use_stmt
)
1039 gcc_assert (gimple_code (use_stmt
) != GIMPLE_PHI
);
1040 gimple_stmt_iterator gsi_def_stmt
= gsi_start_nondebug_bb (begin_bb
);
1042 use_operand_p use_p
;
1043 ssa_op_iter op_iter
;
1044 FOR_EACH_SSA_USE_OPERAND (use_p
, use_stmt
, op_iter
, SSA_OP_USE
)
1046 /* Iterator to the current def of use_p. For function parameters or
1047 anything where def is not found, insert at the beginning of the
1048 generated region. */
1049 gimple_stmt_iterator gsi_stmt
= gsi_def_stmt
;
1051 tree op
= USE_FROM_PTR (use_p
);
1052 gimple
*stmt
= SSA_NAME_DEF_STMT (op
);
1053 if (stmt
&& (gimple_code (stmt
) != GIMPLE_NOP
))
1054 gsi_stmt
= gsi_for_stmt (stmt
);
1056 /* For region parameters, insert at the beginning of the generated
1058 if (!bb_in_sese_p (gsi_bb (gsi_stmt
), codegen_region
))
1059 gsi_stmt
= gsi_def_stmt
;
1061 gsi_def_stmt
= later_of_the_two (gsi_stmt
, gsi_def_stmt
);
1064 if (!gsi_stmt (gsi_def_stmt
))
1066 gimple_stmt_iterator gsi
= gsi_after_labels (gsi_bb (gsi_def_stmt
));
1067 gsi_insert_before (&gsi
, use_stmt
, GSI_NEW_STMT
);
1069 else if (gimple_code (gsi_stmt (gsi_def_stmt
)) == GIMPLE_PHI
)
1071 gimple_stmt_iterator bsi
1072 = gsi_start_nondebug_bb (gsi_bb (gsi_def_stmt
));
1073 /* Insert right after the PHI statements. */
1074 gsi_insert_before (&bsi
, use_stmt
, GSI_NEW_STMT
);
1077 gsi_insert_after (&gsi_def_stmt
, use_stmt
, GSI_NEW_STMT
);
1081 fprintf (dump_file
, "[codegen] inserting statement in BB %d: ",
1082 gimple_bb (use_stmt
)->index
);
1083 print_gimple_stmt (dump_file
, use_stmt
, 0, TDF_VOPS
| TDF_MEMSYMS
);
1088 /* For ops which are scev_analyzeable, we can regenerate a new name from its
1089 scalar evolution around LOOP. */
1091 tree
translate_isl_ast_to_gimple::
1092 get_rename_from_scev (tree old_name
, gimple_seq
*stmts
, loop_p loop
,
1095 tree scev
= cached_scalar_evolution_in_region (region
->region
,
1098 /* At this point we should know the exact scev for each
1099 scalar SSA_NAME used in the scop: all the other scalar
1100 SSA_NAMEs should have been translated out of SSA using
1101 arrays with one element. */
1103 if (chrec_contains_undetermined (scev
))
1105 set_codegen_error ();
1106 return build_zero_cst (TREE_TYPE (old_name
));
1109 new_expr
= chrec_apply_map (scev
, iv_map
);
1111 /* The apply should produce an expression tree containing
1112 the uses of the new induction variables. We should be
1113 able to use new_expr instead of the old_name in the newly
1114 generated loop nest. */
1115 if (chrec_contains_undetermined (new_expr
)
1116 || tree_contains_chrecs (new_expr
, NULL
))
1118 set_codegen_error ();
1119 return build_zero_cst (TREE_TYPE (old_name
));
1122 /* Replace the old_name with the new_expr. */
1123 return force_gimple_operand (unshare_expr (new_expr
), stmts
,
1128 /* Return true if STMT should be copied from region to the new code-generated
1129 region. LABELs, CONDITIONS, induction-variables and region parameters need
1133 should_copy_to_new_region (gimple
*stmt
, sese_info_p region
)
1135 /* Do not copy labels or conditions. */
1136 if (gimple_code (stmt
) == GIMPLE_LABEL
1137 || gimple_code (stmt
) == GIMPLE_COND
)
1141 /* Do not copy induction variables. */
1142 if (is_gimple_assign (stmt
)
1143 && (lhs
= gimple_assign_lhs (stmt
))
1144 && TREE_CODE (lhs
) == SSA_NAME
1145 && scev_analyzable_p (lhs
, region
->region
)
1146 /* But to code-generate liveouts - liveout PHI generation is
1147 in generic sese.c code that cannot do code generation. */
1148 && ! bitmap_bit_p (region
->liveout
, SSA_NAME_VERSION (lhs
)))
1154 /* Duplicates the statements of basic block BB into basic block NEW_BB
1155 and compute the new induction variables according to the IV_MAP. */
1157 void translate_isl_ast_to_gimple::
1158 graphite_copy_stmts_from_block (basic_block bb
, basic_block new_bb
,
1161 /* Iterator poining to the place where new statement (s) will be inserted. */
1162 gimple_stmt_iterator gsi_tgt
= gsi_last_bb (new_bb
);
1164 for (gimple_stmt_iterator gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);
1167 gimple
*stmt
= gsi_stmt (gsi
);
1168 if (!should_copy_to_new_region (stmt
, region
))
1171 /* Create a new copy of STMT and duplicate STMT's virtual
1173 gimple
*copy
= gimple_copy (stmt
);
1175 /* Rather than not copying debug stmts we reset them.
1176 ??? Where we can rewrite uses without inserting new
1177 stmts we could simply do that. */
1178 if (is_gimple_debug (copy
))
1180 if (gimple_debug_bind_p (copy
))
1181 gimple_debug_bind_reset_value (copy
);
1182 else if (gimple_debug_source_bind_p (copy
)
1183 || gimple_debug_nonbind_marker_p (copy
))
1189 maybe_duplicate_eh_stmt (copy
, stmt
);
1190 gimple_duplicate_stmt_histograms (cfun
, copy
, cfun
, stmt
);
1192 /* Crete new names for each def in the copied stmt. */
1193 def_operand_p def_p
;
1194 ssa_op_iter op_iter
;
1195 FOR_EACH_SSA_DEF_OPERAND (def_p
, copy
, op_iter
, SSA_OP_ALL_DEFS
)
1197 tree old_name
= DEF_FROM_PTR (def_p
);
1198 create_new_def_for (old_name
, copy
, def_p
);
1201 gsi_insert_after (&gsi_tgt
, copy
, GSI_NEW_STMT
);
1204 fprintf (dump_file
, "[codegen] inserting statement: ");
1205 print_gimple_stmt (dump_file
, copy
, 0);
1208 /* For each SCEV analyzable SSA_NAME, rename their usage. */
1210 use_operand_p use_p
;
1211 if (!is_gimple_debug (copy
))
1213 bool changed
= false;
1214 FOR_EACH_SSA_USE_OPERAND (use_p
, copy
, iter
, SSA_OP_USE
)
1216 tree old_name
= USE_FROM_PTR (use_p
);
1218 if (TREE_CODE (old_name
) != SSA_NAME
1219 || SSA_NAME_IS_DEFAULT_DEF (old_name
)
1220 || ! scev_analyzable_p (old_name
, region
->region
))
1223 gimple_seq stmts
= NULL
;
1224 tree new_name
= get_rename_from_scev (old_name
, &stmts
,
1225 bb
->loop_father
, iv_map
);
1226 if (! codegen_error_p ())
1227 gsi_insert_earliest (stmts
);
1228 replace_exp (use_p
, new_name
);
1232 fold_stmt_inplace (&gsi_tgt
);
1240 /* Copies BB and includes in the copied BB all the statements that can
1241 be reached following the use-def chains from the memory accesses,
1242 and returns the next edge following this new block. */
1244 edge
translate_isl_ast_to_gimple::
1245 copy_bb_and_scalar_dependences (basic_block bb
, edge next_e
, vec
<tree
> iv_map
)
1247 basic_block new_bb
= split_edge (next_e
);
1248 gimple_stmt_iterator gsi_tgt
= gsi_last_bb (new_bb
);
1249 for (gphi_iterator psi
= gsi_start_phis (bb
); !gsi_end_p (psi
);
1252 gphi
*phi
= psi
.phi ();
1253 tree res
= gimple_phi_result (phi
);
1254 if (virtual_operand_p (res
)
1255 || scev_analyzable_p (res
, region
->region
))
1259 tree
*rename
= region
->rename_map
->get (res
);
1262 new_phi_def
= create_tmp_reg (TREE_TYPE (res
));
1263 set_rename (res
, new_phi_def
);
1266 new_phi_def
= *rename
;
1268 gassign
*ass
= gimple_build_assign (NULL_TREE
, new_phi_def
);
1269 create_new_def_for (res
, ass
, NULL
);
1270 gsi_insert_after (&gsi_tgt
, ass
, GSI_NEW_STMT
);
1273 graphite_copy_stmts_from_block (bb
, new_bb
, iv_map
);
1275 /* Insert out-of SSA copies on the original BB outgoing edges. */
1276 gsi_tgt
= gsi_last_bb (new_bb
);
1277 basic_block bb_for_succs
= bb
;
1278 if (bb_for_succs
== bb_for_succs
->loop_father
->latch
1279 && bb_in_sese_p (bb_for_succs
, region
->region
)
1280 && sese_trivially_empty_bb_p (bb_for_succs
))
1281 bb_for_succs
= NULL
;
1282 while (bb_for_succs
)
1284 basic_block latch
= NULL
;
1287 FOR_EACH_EDGE (e
, ei
, bb_for_succs
->succs
)
1289 for (gphi_iterator psi
= gsi_start_phis (e
->dest
); !gsi_end_p (psi
);
1292 gphi
*phi
= psi
.phi ();
1293 tree res
= gimple_phi_result (phi
);
1294 if (virtual_operand_p (res
)
1295 || scev_analyzable_p (res
, region
->region
))
1299 tree
*rename
= region
->rename_map
->get (res
);
1302 new_phi_def
= create_tmp_reg (TREE_TYPE (res
));
1303 set_rename (res
, new_phi_def
);
1306 new_phi_def
= *rename
;
1308 tree arg
= PHI_ARG_DEF_FROM_EDGE (phi
, e
);
1309 if (TREE_CODE (arg
) == SSA_NAME
1310 && scev_analyzable_p (arg
, region
->region
))
1312 gimple_seq stmts
= NULL
;
1313 tree new_name
= get_rename_from_scev (arg
, &stmts
,
1316 if (! codegen_error_p ())
1317 gsi_insert_earliest (stmts
);
1320 gassign
*ass
= gimple_build_assign (new_phi_def
, arg
);
1321 gsi_insert_after (&gsi_tgt
, ass
, GSI_NEW_STMT
);
1323 if (e
->dest
== bb_for_succs
->loop_father
->latch
1324 && bb_in_sese_p (e
->dest
, region
->region
)
1325 && sese_trivially_empty_bb_p (e
->dest
))
1328 bb_for_succs
= latch
;
1331 return single_succ_edge (new_bb
);
1334 /* Add isl's parameter identifiers and corresponding trees to ivs_params. */
1336 void translate_isl_ast_to_gimple::
1337 add_parameters_to_ivs_params (scop_p scop
, ivs_params
&ip
)
1339 sese_info_p region
= scop
->scop_info
;
1340 unsigned nb_parameters
= isl_set_dim (scop
->param_context
, isl_dim_param
);
1341 gcc_assert (nb_parameters
== sese_nb_params (region
));
1344 FOR_EACH_VEC_ELT (region
->params
, i
, param
)
1346 isl_id
*tmp_id
= isl_set_get_dim_id (scop
->param_context
,
1353 /* Generates a build, which specifies the constraints on the parameters. */
1355 __isl_give isl_ast_build
*translate_isl_ast_to_gimple::
1356 generate_isl_context (scop_p scop
)
1358 isl_set
*context_isl
= isl_set_params (isl_set_copy (scop
->param_context
));
1359 return isl_ast_build_from_context (context_isl
);
1362 /* This method is executed before the construction of a for node. */
1364 ast_build_before_for (__isl_keep isl_ast_build
*build
, void *user
)
1366 isl_union_map
*dependences
= (isl_union_map
*) user
;
1367 ast_build_info
*for_info
= XNEW (struct ast_build_info
);
1368 isl_union_map
*schedule
= isl_ast_build_get_schedule (build
);
1369 isl_space
*schedule_space
= isl_ast_build_get_schedule_space (build
);
1370 int dimension
= isl_space_dim (schedule_space
, isl_dim_out
);
1371 for_info
->is_parallelizable
=
1372 !carries_deps (schedule
, dependences
, dimension
);
1373 isl_union_map_free (schedule
);
1374 isl_space_free (schedule_space
);
1375 isl_id
*id
= isl_id_alloc (isl_ast_build_get_ctx (build
), "", for_info
);
1379 /* Generate isl AST from schedule of SCOP. */
1381 __isl_give isl_ast_node
*translate_isl_ast_to_gimple::
1382 scop_to_isl_ast (scop_p scop
)
1384 int old_err
= isl_options_get_on_error (scop
->isl_context
);
1385 int old_max_operations
= isl_ctx_get_max_operations (scop
->isl_context
);
1386 int max_operations
= PARAM_VALUE (PARAM_MAX_ISL_OPERATIONS
);
1388 isl_ctx_set_max_operations (scop
->isl_context
, max_operations
);
1389 isl_options_set_on_error (scop
->isl_context
, ISL_ON_ERROR_CONTINUE
);
1391 gcc_assert (scop
->transformed_schedule
);
1393 /* Set the separate option to reduce control flow overhead. */
1394 isl_schedule
*schedule
= isl_schedule_map_schedule_node_bottom_up
1395 (isl_schedule_copy (scop
->transformed_schedule
), set_separate_option
, NULL
);
1396 isl_ast_build
*context_isl
= generate_isl_context (scop
);
1398 if (flag_loop_parallelize_all
)
1400 scop_get_dependences (scop
);
1402 isl_ast_build_set_before_each_for (context_isl
, ast_build_before_for
,
1406 isl_ast_node
*ast_isl
= isl_ast_build_node_from_schedule
1407 (context_isl
, schedule
);
1408 isl_ast_build_free (context_isl
);
1410 isl_options_set_on_error (scop
->isl_context
, old_err
);
1411 isl_ctx_reset_operations (scop
->isl_context
);
1412 isl_ctx_set_max_operations (scop
->isl_context
, old_max_operations
);
1413 if (isl_ctx_last_error (scop
->isl_context
) != isl_error_none
)
1415 if (dump_enabled_p ())
1417 dump_user_location_t loc
= find_loop_location
1418 (scop
->scop_info
->region
.entry
->dest
->loop_father
);
1419 if (isl_ctx_last_error (scop
->isl_context
) == isl_error_quota
)
1420 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, loc
,
1421 "loop nest not optimized, AST generation timed out "
1422 "after %d operations [--param max-isl-operations]\n",
1425 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, loc
,
1426 "loop nest not optimized, ISL AST generation "
1427 "signalled an error\n");
1429 isl_ast_node_free (ast_isl
);
1436 /* Generate out-of-SSA copies for the entry edge FALSE_ENTRY/TRUE_ENTRY
1440 generate_entry_out_of_ssa_copies (edge false_entry
,
1444 gimple_stmt_iterator gsi_tgt
= gsi_start_bb (true_entry
->dest
);
1445 for (gphi_iterator psi
= gsi_start_phis (false_entry
->dest
);
1446 !gsi_end_p (psi
); gsi_next (&psi
))
1448 gphi
*phi
= psi
.phi ();
1449 tree res
= gimple_phi_result (phi
);
1450 if (virtual_operand_p (res
))
1452 /* When there's no out-of-SSA var registered do not bother
1454 tree
*rename
= region
->rename_map
->get (res
);
1457 tree new_phi_def
= *rename
;
1458 gassign
*ass
= gimple_build_assign (new_phi_def
,
1459 PHI_ARG_DEF_FROM_EDGE (phi
,
1461 gsi_insert_after (&gsi_tgt
, ass
, GSI_NEW_STMT
);
1465 /* GIMPLE Loop Generator: generates loops in GIMPLE form for the given SCOP.
1466 Return true if code generation succeeded. */
1469 graphite_regenerate_ast_isl (scop_p scop
)
1471 sese_info_p region
= scop
->scop_info
;
1472 translate_isl_ast_to_gimple
t (region
);
1474 ifsese if_region
= NULL
;
1475 isl_ast_node
*root_node
;
1478 timevar_push (TV_GRAPHITE_CODE_GEN
);
1479 t
.add_parameters_to_ivs_params (scop
, ip
);
1480 root_node
= t
.scop_to_isl_ast (scop
);
1483 ivs_params_clear (ip
);
1484 timevar_pop (TV_GRAPHITE_CODE_GEN
);
1488 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1490 fprintf (dump_file
, "[scheduler] original schedule:\n");
1491 print_isl_schedule (dump_file
, scop
->original_schedule
);
1492 fprintf (dump_file
, "[scheduler] isl transformed schedule:\n");
1493 print_isl_schedule (dump_file
, scop
->transformed_schedule
);
1495 fprintf (dump_file
, "[scheduler] original ast:\n");
1496 print_schedule_ast (dump_file
, scop
->original_schedule
, scop
);
1497 fprintf (dump_file
, "[scheduler] AST generated by isl:\n");
1498 print_isl_ast (dump_file
, root_node
);
1501 if_region
= move_sese_in_condition (region
);
1502 region
->if_region
= if_region
;
1504 loop_p context_loop
= region
->region
.entry
->src
->loop_father
;
1505 edge e
= single_succ_edge (if_region
->true_region
->region
.entry
->dest
);
1506 basic_block bb
= split_edge (e
);
1508 /* Update the true_region exit edge. */
1509 region
->if_region
->true_region
->region
.exit
= single_succ_edge (bb
);
1511 t
.translate_isl_ast (context_loop
, root_node
, e
, ip
);
1512 if (! t
.codegen_error_p ())
1514 generate_entry_out_of_ssa_copies (if_region
->false_region
->region
.entry
,
1515 if_region
->true_region
->region
.entry
,
1517 sese_insert_phis_for_liveouts (region
,
1518 if_region
->region
->region
.exit
->src
,
1519 if_region
->false_region
->region
.exit
,
1520 if_region
->true_region
->region
.exit
);
1522 fprintf (dump_file
, "[codegen] isl AST to Gimple succeeded.\n");
1525 if (t
.codegen_error_p ())
1527 if (dump_enabled_p ())
1529 dump_user_location_t loc
= find_loop_location
1530 (scop
->scop_info
->region
.entry
->dest
->loop_father
);
1531 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, loc
,
1532 "loop nest not optimized, code generation error\n");
1535 /* Remove the unreachable region. */
1536 remove_edge_and_dominated_blocks (if_region
->true_region
->region
.entry
);
1537 basic_block ifb
= if_region
->false_region
->region
.entry
->src
;
1538 gimple_stmt_iterator gsi
= gsi_last_bb (ifb
);
1539 gsi_remove (&gsi
, true);
1540 if_region
->false_region
->region
.entry
->flags
&= ~EDGE_FALSE_VALUE
;
1541 if_region
->false_region
->region
.entry
->flags
|= EDGE_FALLTHRU
;
1542 /* remove_edge_and_dominated_blocks marks loops for removal but
1543 doesn't actually remove them (fix that...). */
1545 FOR_EACH_LOOP (loop
, LI_FROM_INNERMOST
)
1550 /* We are delaying SSA update to after code-generating all SCOPs.
1551 This is because we analyzed DRs and parameters on the unmodified
1552 IL and thus rely on SSA update to pick up new dominating definitions
1553 from for example SESE liveout PHIs. This is also for efficiency
1554 as SSA update does work depending on the size of the function. */
1556 free (if_region
->true_region
);
1557 free (if_region
->region
);
1560 ivs_params_clear (ip
);
1561 isl_ast_node_free (root_node
);
1562 timevar_pop (TV_GRAPHITE_CODE_GEN
);
1564 return !t
.codegen_error_p ();
1567 #endif /* HAVE_isl */