1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2013 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
38 The inliner itself is split into two passes:
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
94 #include "coretypes.h"
97 #include "tree-inline.h"
98 #include "langhooks.h"
101 #include "diagnostic.h"
102 #include "gimple-pretty-print.h"
106 #include "tree-pass.h"
107 #include "coverage.h"
110 #include "tree-flow.h"
111 #include "ipa-prop.h"
114 #include "ipa-inline.h"
115 #include "ipa-utils.h"
117 /* Statistics we collect about inlining algorithm. */
118 static int overall_size
;
119 static gcov_type max_count
;
121 /* Return false when inlining edge E would lead to violating
122 limits on function unit growth or stack usage growth.
124 The relative function body growth limit is present generally
125 to avoid problems with non-linear behavior of the compiler.
126 To allow inlining huge functions into tiny wrapper, the limit
127 is always based on the bigger of the two functions considered.
129 For stack growth limits we always base the growth in stack usage
130 of the callers. We want to prevent applications from segfaulting
131 on stack overflow when functions with huge stack frames gets
135 caller_growth_limits (struct cgraph_edge
*e
)
137 struct cgraph_node
*to
= e
->caller
;
138 struct cgraph_node
*what
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
141 HOST_WIDE_INT stack_size_limit
= 0, inlined_stack
;
142 struct inline_summary
*info
, *what_info
, *outer_info
= inline_summary (to
);
144 /* Look for function e->caller is inlined to. While doing
145 so work out the largest function body on the way. As
146 described above, we want to base our function growth
147 limits based on that. Not on the self size of the
148 outer function, not on the self size of inline code
149 we immediately inline to. This is the most relaxed
150 interpretation of the rule "do not grow large functions
151 too much in order to prevent compiler from exploding". */
154 info
= inline_summary (to
);
155 if (limit
< info
->self_size
)
156 limit
= info
->self_size
;
157 if (stack_size_limit
< info
->estimated_self_stack_size
)
158 stack_size_limit
= info
->estimated_self_stack_size
;
159 if (to
->global
.inlined_to
)
160 to
= to
->callers
->caller
;
165 what_info
= inline_summary (what
);
167 if (limit
< what_info
->self_size
)
168 limit
= what_info
->self_size
;
170 limit
+= limit
* PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH
) / 100;
172 /* Check the size after inlining against the function limits. But allow
173 the function to shrink if it went over the limits by forced inlining. */
174 newsize
= estimate_size_after_inlining (to
, e
);
175 if (newsize
>= info
->size
176 && newsize
> PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS
)
179 e
->inline_failed
= CIF_LARGE_FUNCTION_GROWTH_LIMIT
;
183 if (!what_info
->estimated_stack_size
)
186 /* FIXME: Stack size limit often prevents inlining in Fortran programs
187 due to large i/o datastructures used by the Fortran front-end.
188 We ought to ignore this limit when we know that the edge is executed
189 on every invocation of the caller (i.e. its call statement dominates
190 exit block). We do not track this information, yet. */
191 stack_size_limit
+= ((gcov_type
)stack_size_limit
192 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH
) / 100);
194 inlined_stack
= (outer_info
->stack_frame_offset
195 + outer_info
->estimated_self_stack_size
196 + what_info
->estimated_stack_size
);
197 /* Check new stack consumption with stack consumption at the place
199 if (inlined_stack
> stack_size_limit
200 /* If function already has large stack usage from sibling
201 inline call, we can inline, too.
202 This bit overoptimistically assume that we are good at stack
204 && inlined_stack
> info
->estimated_stack_size
205 && inlined_stack
> PARAM_VALUE (PARAM_LARGE_STACK_FRAME
))
207 e
->inline_failed
= CIF_LARGE_STACK_FRAME_GROWTH_LIMIT
;
213 /* Dump info about why inlining has failed. */
216 report_inline_failed_reason (struct cgraph_edge
*e
)
220 fprintf (dump_file
, " not inlinable: %s/%i -> %s/%i, %s\n",
221 xstrdup (cgraph_node_name (e
->caller
)), e
->caller
->symbol
.order
,
222 xstrdup (cgraph_node_name (e
->callee
)), e
->callee
->symbol
.order
,
223 cgraph_inline_failed_string (e
->inline_failed
));
227 /* Decide if we can inline the edge and possibly update
228 inline_failed reason.
229 We check whether inlining is possible at all and whether
230 caller growth limits allow doing so.
232 if REPORT is true, output reason to the dump file.
234 if DISREGARD_LIMITES is true, ignore size limits.*/
237 can_inline_edge_p (struct cgraph_edge
*e
, bool report
,
238 bool disregard_limits
= false)
240 bool inlinable
= true;
241 enum availability avail
;
242 struct cgraph_node
*callee
243 = cgraph_function_or_thunk_node (e
->callee
, &avail
);
244 tree caller_tree
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e
->caller
->symbol
.decl
);
246 = callee
? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee
->symbol
.decl
) : NULL
;
247 struct function
*caller_cfun
= DECL_STRUCT_FUNCTION (e
->caller
->symbol
.decl
);
248 struct function
*callee_cfun
249 = callee
? DECL_STRUCT_FUNCTION (callee
->symbol
.decl
) : NULL
;
251 if (!caller_cfun
&& e
->caller
->clone_of
)
252 caller_cfun
= DECL_STRUCT_FUNCTION (e
->caller
->clone_of
->symbol
.decl
);
254 if (!callee_cfun
&& callee
&& callee
->clone_of
)
255 callee_cfun
= DECL_STRUCT_FUNCTION (callee
->clone_of
->symbol
.decl
);
257 gcc_assert (e
->inline_failed
);
259 if (!callee
|| !callee
->symbol
.definition
)
261 e
->inline_failed
= CIF_BODY_NOT_AVAILABLE
;
264 else if (!inline_summary (callee
)->inlinable
)
266 e
->inline_failed
= CIF_FUNCTION_NOT_INLINABLE
;
269 else if (avail
<= AVAIL_OVERWRITABLE
)
271 e
->inline_failed
= CIF_OVERWRITABLE
;
274 else if (e
->call_stmt_cannot_inline_p
)
276 e
->inline_failed
= CIF_MISMATCHED_ARGUMENTS
;
279 /* Don't inline if the functions have different EH personalities. */
280 else if (DECL_FUNCTION_PERSONALITY (e
->caller
->symbol
.decl
)
281 && DECL_FUNCTION_PERSONALITY (callee
->symbol
.decl
)
282 && (DECL_FUNCTION_PERSONALITY (e
->caller
->symbol
.decl
)
283 != DECL_FUNCTION_PERSONALITY (callee
->symbol
.decl
)))
285 e
->inline_failed
= CIF_EH_PERSONALITY
;
288 /* TM pure functions should not be inlined into non-TM_pure
290 else if (is_tm_pure (callee
->symbol
.decl
)
291 && !is_tm_pure (e
->caller
->symbol
.decl
))
293 e
->inline_failed
= CIF_UNSPECIFIED
;
296 /* Don't inline if the callee can throw non-call exceptions but the
298 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
299 Move the flag into cgraph node or mirror it in the inline summary. */
300 else if (callee_cfun
&& callee_cfun
->can_throw_non_call_exceptions
301 && !(caller_cfun
&& caller_cfun
->can_throw_non_call_exceptions
))
303 e
->inline_failed
= CIF_NON_CALL_EXCEPTIONS
;
306 /* Check compatibility of target optimization options. */
307 else if (!targetm
.target_option
.can_inline_p (e
->caller
->symbol
.decl
,
308 callee
->symbol
.decl
))
310 e
->inline_failed
= CIF_TARGET_OPTION_MISMATCH
;
313 /* Check if caller growth allows the inlining. */
314 else if (!DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
)
316 && !lookup_attribute ("flatten",
318 (e
->caller
->global
.inlined_to
319 ? e
->caller
->global
.inlined_to
->symbol
.decl
320 : e
->caller
->symbol
.decl
))
321 && !caller_growth_limits (e
))
323 /* Don't inline a function with a higher optimization level than the
324 caller. FIXME: this is really just tip of iceberg of handling
325 optimization attribute. */
326 else if (caller_tree
!= callee_tree
)
328 struct cl_optimization
*caller_opt
329 = TREE_OPTIMIZATION ((caller_tree
)
331 : optimization_default_node
);
333 struct cl_optimization
*callee_opt
334 = TREE_OPTIMIZATION ((callee_tree
)
336 : optimization_default_node
);
338 if (((caller_opt
->x_optimize
> callee_opt
->x_optimize
)
339 || (caller_opt
->x_optimize_size
!= callee_opt
->x_optimize_size
))
340 /* gcc.dg/pr43564.c. Look at forced inline even in -O0. */
341 && !DECL_DISREGARD_INLINE_LIMITS (e
->callee
->symbol
.decl
))
343 e
->inline_failed
= CIF_OPTIMIZATION_MISMATCH
;
348 if (!inlinable
&& report
)
349 report_inline_failed_reason (e
);
354 /* Return true if the edge E is inlinable during early inlining. */
357 can_early_inline_edge_p (struct cgraph_edge
*e
)
359 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
,
361 /* Early inliner might get called at WPA stage when IPA pass adds new
362 function. In this case we can not really do any of early inlining
363 because function bodies are missing. */
364 if (!gimple_has_body_p (callee
->symbol
.decl
))
366 e
->inline_failed
= CIF_BODY_NOT_AVAILABLE
;
369 /* In early inliner some of callees may not be in SSA form yet
370 (i.e. the callgraph is cyclic and we did not process
371 the callee by early inliner, yet). We don't have CIF code for this
372 case; later we will re-do the decision in the real inliner. */
373 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e
->caller
->symbol
.decl
))
374 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee
->symbol
.decl
)))
377 fprintf (dump_file
, " edge not inlinable: not in SSA form\n");
380 if (!can_inline_edge_p (e
, true))
386 /* Return number of calls in N. Ignore cheap builtins. */
389 num_calls (struct cgraph_node
*n
)
391 struct cgraph_edge
*e
;
394 for (e
= n
->callees
; e
; e
= e
->next_callee
)
395 if (!is_inexpensive_builtin (e
->callee
->symbol
.decl
))
401 /* Return true if we are interested in inlining small function. */
404 want_early_inline_function_p (struct cgraph_edge
*e
)
406 bool want_inline
= true;
407 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
409 if (DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
))
411 else if (!DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
412 && !flag_inline_small_functions
)
414 e
->inline_failed
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
415 report_inline_failed_reason (e
);
420 int growth
= estimate_edge_growth (e
);
425 else if (!cgraph_maybe_hot_edge_p (e
)
429 fprintf (dump_file
, " will not early inline: %s/%i->%s/%i, "
430 "call is cold and code would grow by %i\n",
431 xstrdup (cgraph_node_name (e
->caller
)),
432 e
->caller
->symbol
.order
,
433 xstrdup (cgraph_node_name (callee
)), callee
->symbol
.order
,
437 else if (growth
> PARAM_VALUE (PARAM_EARLY_INLINING_INSNS
))
440 fprintf (dump_file
, " will not early inline: %s/%i->%s/%i, "
441 "growth %i exceeds --param early-inlining-insns\n",
442 xstrdup (cgraph_node_name (e
->caller
)),
443 e
->caller
->symbol
.order
,
444 xstrdup (cgraph_node_name (callee
)), callee
->symbol
.order
,
448 else if ((n
= num_calls (callee
)) != 0
449 && growth
* (n
+ 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS
))
452 fprintf (dump_file
, " will not early inline: %s/%i->%s/%i, "
453 "growth %i exceeds --param early-inlining-insns "
454 "divided by number of calls\n",
455 xstrdup (cgraph_node_name (e
->caller
)),
456 e
->caller
->symbol
.order
,
457 xstrdup (cgraph_node_name (callee
)), callee
->symbol
.order
,
465 /* Compute time of the edge->caller + edge->callee execution when inlining
469 compute_uninlined_call_time (struct inline_summary
*callee_info
,
470 struct cgraph_edge
*edge
)
472 gcov_type uninlined_call_time
=
473 RDIV ((gcov_type
)callee_info
->time
* MAX (edge
->frequency
, 1),
475 gcov_type caller_time
= inline_summary (edge
->caller
->global
.inlined_to
476 ? edge
->caller
->global
.inlined_to
477 : edge
->caller
)->time
;
478 return uninlined_call_time
+ caller_time
;
481 /* Same as compute_uinlined_call_time but compute time when inlining
485 compute_inlined_call_time (struct cgraph_edge
*edge
,
488 gcov_type caller_time
= inline_summary (edge
->caller
->global
.inlined_to
489 ? edge
->caller
->global
.inlined_to
490 : edge
->caller
)->time
;
491 gcov_type time
= (caller_time
492 + RDIV (((gcov_type
) edge_time
493 - inline_edge_summary (edge
)->call_stmt_time
)
494 * MAX (edge
->frequency
, 1), CGRAPH_FREQ_BASE
));
495 /* Possible one roundoff error, but watch for overflows. */
496 gcc_checking_assert (time
>= INT_MIN
/ 2);
502 /* Return true if the speedup for inlining E is bigger than
503 PARAM_MAX_INLINE_MIN_SPEEDUP. */
506 big_speedup_p (struct cgraph_edge
*e
)
508 gcov_type time
= compute_uninlined_call_time (inline_summary (e
->callee
),
510 gcov_type inlined_time
= compute_inlined_call_time (e
,
511 estimate_edge_time (e
));
512 if (time
- inlined_time
513 > RDIV (time
* PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP
), 100))
518 /* Return true if we are interested in inlining small function.
519 When REPORT is true, report reason to dump file. */
522 want_inline_small_function_p (struct cgraph_edge
*e
, bool report
)
524 bool want_inline
= true;
525 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
527 if (DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
))
529 else if (!DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
530 && !flag_inline_small_functions
)
532 e
->inline_failed
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
537 int growth
= estimate_edge_growth (e
);
538 inline_hints hints
= estimate_edge_hints (e
);
539 bool big_speedup
= big_speedup_p (e
);
543 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
544 hints suggests that inlining given function is very profitable. */
545 else if (DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
546 && growth
>= MAX_INLINE_INSNS_SINGLE
548 && !(hints
& (INLINE_HINT_indirect_call
549 | INLINE_HINT_loop_iterations
550 | INLINE_HINT_array_index
551 | INLINE_HINT_loop_stride
)))
553 e
->inline_failed
= CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
;
556 /* Before giving up based on fact that caller size will grow, allow
557 functions that are called few times and eliminating the offline
558 copy will lead to overall code size reduction.
559 Not all of these will be handled by subsequent inlining of functions
560 called once: in particular weak functions are not handled or funcitons
561 that inline to multiple calls but a lot of bodies is optimized out.
562 Finally we want to inline earlier to allow inlining of callbacks.
564 This is slightly wrong on aggressive side: it is entirely possible
565 that function is called many times with a context where inlining
566 reduces code size and few times with a context where inlining increase
567 code size. Resoluting growth estimate will be negative even if it
568 would make more sense to keep offline copy and do not inline into the
569 call sites that makes the code size grow.
571 When badness orders the calls in a way that code reducing calls come
572 first, this situation is not a problem at all: after inlining all
573 "good" calls, we will realize that keeping the function around is
575 else if (growth
<= MAX_INLINE_INSNS_SINGLE
576 /* Unlike for functions called once, we play unsafe with
577 COMDATs. We can allow that since we know functions
578 in consideration are small (and thus risk is small) and
579 moreover grow estimates already accounts that COMDAT
580 functions may or may not disappear when eliminated from
581 current unit. With good probability making aggressive
582 choice in all units is going to make overall program
585 Consequently we ask cgraph_can_remove_if_no_direct_calls_p
587 cgraph_will_be_removed_from_program_if_no_direct_calls */
588 && !DECL_EXTERNAL (callee
->symbol
.decl
)
589 && cgraph_can_remove_if_no_direct_calls_p (callee
)
590 && estimate_growth (callee
) <= 0)
592 else if (!DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
593 && !flag_inline_functions
)
595 e
->inline_failed
= CIF_NOT_DECLARED_INLINED
;
598 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
599 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
600 inlining given function is very profitable. */
601 else if (!DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
603 && growth
>= ((hints
& (INLINE_HINT_indirect_call
604 | INLINE_HINT_loop_iterations
605 | INLINE_HINT_array_index
606 | INLINE_HINT_loop_stride
))
607 ? MAX (MAX_INLINE_INSNS_AUTO
,
608 MAX_INLINE_INSNS_SINGLE
)
609 : MAX_INLINE_INSNS_AUTO
))
611 e
->inline_failed
= CIF_MAX_INLINE_INSNS_AUTO_LIMIT
;
614 /* If call is cold, do not inline when function body would grow. */
615 else if (!cgraph_maybe_hot_edge_p (e
))
617 e
->inline_failed
= CIF_UNLIKELY_CALL
;
621 if (!want_inline
&& report
)
622 report_inline_failed_reason (e
);
626 /* EDGE is self recursive edge.
627 We hand two cases - when function A is inlining into itself
628 or when function A is being inlined into another inliner copy of function
631 In first case OUTER_NODE points to the toplevel copy of A, while
632 in the second case OUTER_NODE points to the outermost copy of A in B.
634 In both cases we want to be extra selective since
635 inlining the call will just introduce new recursive calls to appear. */
638 want_inline_self_recursive_call_p (struct cgraph_edge
*edge
,
639 struct cgraph_node
*outer_node
,
643 char const *reason
= NULL
;
644 bool want_inline
= true;
645 int caller_freq
= CGRAPH_FREQ_BASE
;
646 int max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO
);
648 if (DECL_DECLARED_INLINE_P (edge
->caller
->symbol
.decl
))
649 max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH
);
651 if (!cgraph_maybe_hot_edge_p (edge
))
653 reason
= "recursive call is cold";
656 else if (max_count
&& !outer_node
->count
)
658 reason
= "not executed in profile";
661 else if (depth
> max_depth
)
663 reason
= "--param max-inline-recursive-depth exceeded.";
667 if (outer_node
->global
.inlined_to
)
668 caller_freq
= outer_node
->callers
->frequency
;
672 /* Inlining of self recursive function into copy of itself within other function
673 is transformation similar to loop peeling.
675 Peeling is profitable if we can inline enough copies to make probability
676 of actual call to the self recursive function very small. Be sure that
677 the probability of recursion is small.
679 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
680 This way the expected number of recision is at most max_depth. */
683 int max_prob
= CGRAPH_FREQ_BASE
- ((CGRAPH_FREQ_BASE
+ max_depth
- 1)
686 for (i
= 1; i
< depth
; i
++)
687 max_prob
= max_prob
* max_prob
/ CGRAPH_FREQ_BASE
;
689 && (edge
->count
* CGRAPH_FREQ_BASE
/ outer_node
->count
692 reason
= "profile of recursive call is too large";
696 && (edge
->frequency
* CGRAPH_FREQ_BASE
/ caller_freq
699 reason
= "frequency of recursive call is too large";
703 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
704 depth is large. We reduce function call overhead and increase chances that
705 things fit in hardware return predictor.
707 Recursive inlining might however increase cost of stack frame setup
708 actually slowing down functions whose recursion tree is wide rather than
711 Deciding reliably on when to do recursive inlining without profile feedback
712 is tricky. For now we disable recursive inlining when probability of self
715 Recursive inlining of self recursive call within loop also results in large loop
716 depths that generally optimize badly. We may want to throttle down inlining
717 in those cases. In particular this seems to happen in one of libstdc++ rb tree
722 && (edge
->count
* 100 / outer_node
->count
723 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY
)))
725 reason
= "profile of recursive call is too small";
729 && (edge
->frequency
* 100 / caller_freq
730 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY
)))
732 reason
= "frequency of recursive call is too small";
736 if (!want_inline
&& dump_file
)
737 fprintf (dump_file
, " not inlining recursively: %s\n", reason
);
741 /* Return true when NODE has caller other than EDGE.
742 Worker for cgraph_for_node_and_aliases. */
745 check_caller_edge (struct cgraph_node
*node
, void *edge
)
747 return (node
->callers
748 && node
->callers
!= edge
);
752 /* Decide if inlining NODE would reduce unit size by eliminating
753 the offline copy of function.
754 When COLD is true the cold calls are considered, too. */
757 want_inline_function_to_all_callers_p (struct cgraph_node
*node
, bool cold
)
759 struct cgraph_node
*function
= cgraph_function_or_thunk_node (node
, NULL
);
760 struct cgraph_edge
*e
;
761 bool has_hot_call
= false;
763 /* Does it have callers? */
766 /* Already inlined? */
767 if (function
->global
.inlined_to
)
769 if (cgraph_function_or_thunk_node (node
, NULL
) != node
)
771 /* Inlining into all callers would increase size? */
772 if (estimate_growth (node
) > 0)
774 /* Maybe other aliases has more direct calls. */
775 if (cgraph_for_node_and_aliases (node
, check_caller_edge
, node
->callers
, true))
777 /* All inlines must be possible. */
778 for (e
= node
->callers
; e
; e
= e
->next_caller
)
780 if (!can_inline_edge_p (e
, true))
782 if (!has_hot_call
&& cgraph_maybe_hot_edge_p (e
))
786 if (!cold
&& !has_hot_call
)
791 #define RELATIVE_TIME_BENEFIT_RANGE (INT_MAX / 64)
793 /* Return relative time improvement for inlining EDGE in range
794 1...RELATIVE_TIME_BENEFIT_RANGE */
797 relative_time_benefit (struct inline_summary
*callee_info
,
798 struct cgraph_edge
*edge
,
801 gcov_type relbenefit
;
802 gcov_type uninlined_call_time
= compute_uninlined_call_time (callee_info
, edge
);
803 gcov_type inlined_call_time
= compute_inlined_call_time (edge
, edge_time
);
805 /* Inlining into extern inline function is not a win. */
806 if (DECL_EXTERNAL (edge
->caller
->global
.inlined_to
807 ? edge
->caller
->global
.inlined_to
->symbol
.decl
808 : edge
->caller
->symbol
.decl
))
811 /* Watch overflows. */
812 gcc_checking_assert (uninlined_call_time
>= 0);
813 gcc_checking_assert (inlined_call_time
>= 0);
814 gcc_checking_assert (uninlined_call_time
>= inlined_call_time
);
816 /* Compute relative time benefit, i.e. how much the call becomes faster.
817 ??? perhaps computing how much the caller+calle together become faster
818 would lead to more realistic results. */
819 if (!uninlined_call_time
)
820 uninlined_call_time
= 1;
822 RDIV (((gcov_type
)uninlined_call_time
- inlined_call_time
) * RELATIVE_TIME_BENEFIT_RANGE
,
823 uninlined_call_time
);
824 relbenefit
= MIN (relbenefit
, RELATIVE_TIME_BENEFIT_RANGE
);
825 gcc_checking_assert (relbenefit
>= 0);
826 relbenefit
= MAX (relbenefit
, 1);
831 /* A cost model driving the inlining heuristics in a way so the edges with
832 smallest badness are inlined first. After each inlining is performed
833 the costs of all caller edges of nodes affected are recomputed so the
834 metrics may accurately depend on values such as number of inlinable callers
835 of the function or function body size. */
838 edge_badness (struct cgraph_edge
*edge
, bool dump
)
841 int growth
, edge_time
;
842 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (edge
->callee
,
844 struct inline_summary
*callee_info
= inline_summary (callee
);
847 if (DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
))
850 growth
= estimate_edge_growth (edge
);
851 edge_time
= estimate_edge_time (edge
);
852 hints
= estimate_edge_hints (edge
);
853 gcc_checking_assert (edge_time
>= 0);
854 gcc_checking_assert (edge_time
<= callee_info
->time
);
855 gcc_checking_assert (growth
<= callee_info
->size
);
859 fprintf (dump_file
, " Badness calculation for %s/%i -> %s/%i\n",
860 xstrdup (cgraph_node_name (edge
->caller
)),
861 edge
->caller
->symbol
.order
,
862 xstrdup (cgraph_node_name (callee
)),
863 edge
->callee
->symbol
.order
);
864 fprintf (dump_file
, " size growth %i, time %i ",
867 dump_inline_hints (dump_file
, hints
);
868 if (big_speedup_p (edge
))
869 fprintf (dump_file
, " big_speedup");
870 fprintf (dump_file
, "\n");
873 /* Always prefer inlining saving code size. */
876 badness
= INT_MIN
/ 2 + growth
;
878 fprintf (dump_file
, " %i: Growth %i <= 0\n", (int) badness
,
882 /* When profiling is available, compute badness as:
884 relative_edge_count * relative_time_benefit
885 goodness = -------------------------------------------
889 The fraction is upside down, because on edge counts and time beneits
890 the bounds are known. Edge growth is essentially unlimited. */
894 int relbenefit
= relative_time_benefit (callee_info
, edge
, edge_time
);
897 ((double) edge
->count
* INT_MIN
/ 2 / max_count
/ RELATIVE_TIME_BENEFIT_RANGE
) *
898 relbenefit
) / growth
;
900 /* Be sure that insanity of the profile won't lead to increasing counts
901 in the scalling and thus to overflow in the computation above. */
902 gcc_assert (max_count
>= edge
->count
);
906 " %i (relative %f): profile info. Relative count %f"
907 " * Relative benefit %f\n",
908 (int) badness
, (double) badness
/ INT_MIN
,
909 (double) edge
->count
/ max_count
,
910 relbenefit
* 100.0 / RELATIVE_TIME_BENEFIT_RANGE
);
914 /* When function local profile is available. Compute badness as:
916 relative_time_benefit
917 goodness = ---------------------------------
918 growth_of_caller * overall_growth
922 compensated by the inline hints.
924 else if (flag_guess_branch_prob
)
926 badness
= (relative_time_benefit (callee_info
, edge
, edge_time
)
927 * (INT_MIN
/ 16 / RELATIVE_TIME_BENEFIT_RANGE
));
928 badness
/= (MIN (65536/2, growth
) * MIN (65536/2, MAX (1, callee_info
->growth
)));
929 gcc_checking_assert (badness
<=0 && badness
>= INT_MIN
/ 16);
930 if ((hints
& (INLINE_HINT_indirect_call
931 | INLINE_HINT_loop_iterations
932 | INLINE_HINT_array_index
933 | INLINE_HINT_loop_stride
))
934 || callee_info
->growth
<= 0)
936 if (hints
& (INLINE_HINT_same_scc
))
938 else if (hints
& (INLINE_HINT_in_scc
))
940 else if (hints
& (INLINE_HINT_cross_module
))
942 gcc_checking_assert (badness
<= 0 && badness
>= INT_MIN
/ 2);
943 if ((hints
& INLINE_HINT_declared_inline
) && badness
>= INT_MIN
/ 32)
948 " %i: guessed profile. frequency %f,"
949 " benefit %f%%, time w/o inlining %i, time w inlining %i"
950 " overall growth %i (current) %i (original)\n",
951 (int) badness
, (double)edge
->frequency
/ CGRAPH_FREQ_BASE
,
952 relative_time_benefit (callee_info
, edge
, edge_time
) * 100.0
953 / RELATIVE_TIME_BENEFIT_RANGE
,
954 (int)compute_uninlined_call_time (callee_info
, edge
),
955 (int)compute_inlined_call_time (edge
, edge_time
),
956 estimate_growth (callee
),
957 callee_info
->growth
);
960 /* When function local profile is not available or it does not give
961 useful information (ie frequency is zero), base the cost on
962 loop nest and overall size growth, so we optimize for overall number
963 of functions fully inlined in program. */
966 int nest
= MIN (inline_edge_summary (edge
)->loop_depth
, 8);
967 badness
= growth
* 256;
969 /* Decrease badness if call is nested. */
977 fprintf (dump_file
, " %i: no profile. nest %i\n", (int) badness
,
981 /* Ensure that we did not overflow in all the fixed point math above. */
982 gcc_assert (badness
>= INT_MIN
);
983 gcc_assert (badness
<= INT_MAX
- 1);
984 /* Make recursive inlining happen always after other inlining is done. */
985 if (cgraph_edge_recursive_p (edge
))
991 /* Recompute badness of EDGE and update its key in HEAP if needed. */
993 update_edge_key (fibheap_t heap
, struct cgraph_edge
*edge
)
995 int badness
= edge_badness (edge
, false);
998 fibnode_t n
= (fibnode_t
) edge
->aux
;
999 gcc_checking_assert (n
->data
== edge
);
1001 /* fibheap_replace_key only decrease the keys.
1002 When we increase the key we do not update heap
1003 and instead re-insert the element once it becomes
1004 a minimum of heap. */
1005 if (badness
< n
->key
)
1007 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1010 " decreasing badness %s/%i -> %s/%i, %i to %i\n",
1011 xstrdup (cgraph_node_name (edge
->caller
)),
1012 edge
->caller
->symbol
.order
,
1013 xstrdup (cgraph_node_name (edge
->callee
)),
1014 edge
->callee
->symbol
.order
,
1018 fibheap_replace_key (heap
, n
, badness
);
1019 gcc_checking_assert (n
->key
== badness
);
1024 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1027 " enqueuing call %s/%i -> %s/%i, badness %i\n",
1028 xstrdup (cgraph_node_name (edge
->caller
)),
1029 edge
->caller
->symbol
.order
,
1030 xstrdup (cgraph_node_name (edge
->callee
)),
1031 edge
->callee
->symbol
.order
,
1034 edge
->aux
= fibheap_insert (heap
, badness
, edge
);
1039 /* NODE was inlined.
1040 All caller edges needs to be resetted because
1041 size estimates change. Similarly callees needs reset
1042 because better context may be known. */
1045 reset_edge_caches (struct cgraph_node
*node
)
1047 struct cgraph_edge
*edge
;
1048 struct cgraph_edge
*e
= node
->callees
;
1049 struct cgraph_node
*where
= node
;
1051 struct ipa_ref
*ref
;
1053 if (where
->global
.inlined_to
)
1054 where
= where
->global
.inlined_to
;
1056 /* WHERE body size has changed, the cached growth is invalid. */
1057 reset_node_growth_cache (where
);
1059 for (edge
= where
->callers
; edge
; edge
= edge
->next_caller
)
1060 if (edge
->inline_failed
)
1061 reset_edge_growth_cache (edge
);
1062 for (i
= 0; ipa_ref_list_referring_iterate (&where
->symbol
.ref_list
,
1064 if (ref
->use
== IPA_REF_ALIAS
)
1065 reset_edge_caches (ipa_ref_referring_node (ref
));
1071 if (!e
->inline_failed
&& e
->callee
->callees
)
1072 e
= e
->callee
->callees
;
1075 if (e
->inline_failed
)
1076 reset_edge_growth_cache (e
);
1083 if (e
->caller
== node
)
1085 e
= e
->caller
->callers
;
1087 while (!e
->next_callee
);
1093 /* Recompute HEAP nodes for each of caller of NODE.
1094 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1095 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1096 it is inlinable. Otherwise check all edges. */
1099 update_caller_keys (fibheap_t heap
, struct cgraph_node
*node
,
1100 bitmap updated_nodes
,
1101 struct cgraph_edge
*check_inlinablity_for
)
1103 struct cgraph_edge
*edge
;
1105 struct ipa_ref
*ref
;
1107 if ((!node
->symbol
.alias
&& !inline_summary (node
)->inlinable
)
1108 || node
->global
.inlined_to
)
1110 if (!bitmap_set_bit (updated_nodes
, node
->uid
))
1113 for (i
= 0; ipa_ref_list_referring_iterate (&node
->symbol
.ref_list
,
1115 if (ref
->use
== IPA_REF_ALIAS
)
1117 struct cgraph_node
*alias
= ipa_ref_referring_node (ref
);
1118 update_caller_keys (heap
, alias
, updated_nodes
, check_inlinablity_for
);
1121 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1122 if (edge
->inline_failed
)
1124 if (!check_inlinablity_for
1125 || check_inlinablity_for
== edge
)
1127 if (can_inline_edge_p (edge
, false)
1128 && want_inline_small_function_p (edge
, false))
1129 update_edge_key (heap
, edge
);
1132 report_inline_failed_reason (edge
);
1133 fibheap_delete_node (heap
, (fibnode_t
) edge
->aux
);
1138 update_edge_key (heap
, edge
);
1142 /* Recompute HEAP nodes for each uninlined call in NODE.
1143 This is used when we know that edge badnesses are going only to increase
1144 (we introduced new call site) and thus all we need is to insert newly
1145 created edges into heap. */
1148 update_callee_keys (fibheap_t heap
, struct cgraph_node
*node
,
1149 bitmap updated_nodes
)
1151 struct cgraph_edge
*e
= node
->callees
;
1156 if (!e
->inline_failed
&& e
->callee
->callees
)
1157 e
= e
->callee
->callees
;
1160 enum availability avail
;
1161 struct cgraph_node
*callee
;
1162 /* We do not reset callee growth cache here. Since we added a new call,
1163 growth chould have just increased and consequentely badness metric
1164 don't need updating. */
1165 if (e
->inline_failed
1166 && (callee
= cgraph_function_or_thunk_node (e
->callee
, &avail
))
1167 && inline_summary (callee
)->inlinable
1168 && avail
>= AVAIL_AVAILABLE
1169 && !bitmap_bit_p (updated_nodes
, callee
->uid
))
1171 if (can_inline_edge_p (e
, false)
1172 && want_inline_small_function_p (e
, false))
1173 update_edge_key (heap
, e
);
1176 report_inline_failed_reason (e
);
1177 fibheap_delete_node (heap
, (fibnode_t
) e
->aux
);
1187 if (e
->caller
== node
)
1189 e
= e
->caller
->callers
;
1191 while (!e
->next_callee
);
1197 /* Enqueue all recursive calls from NODE into priority queue depending on
1198 how likely we want to recursively inline the call. */
1201 lookup_recursive_calls (struct cgraph_node
*node
, struct cgraph_node
*where
,
1204 struct cgraph_edge
*e
;
1205 enum availability avail
;
1207 for (e
= where
->callees
; e
; e
= e
->next_callee
)
1208 if (e
->callee
== node
1209 || (cgraph_function_or_thunk_node (e
->callee
, &avail
) == node
1210 && avail
> AVAIL_OVERWRITABLE
))
1212 /* When profile feedback is available, prioritize by expected number
1214 fibheap_insert (heap
,
1215 !max_count
? -e
->frequency
1216 : -(e
->count
/ ((max_count
+ (1<<24) - 1) / (1<<24))),
1219 for (e
= where
->callees
; e
; e
= e
->next_callee
)
1220 if (!e
->inline_failed
)
1221 lookup_recursive_calls (node
, e
->callee
, heap
);
1224 /* Decide on recursive inlining: in the case function has recursive calls,
1225 inline until body size reaches given argument. If any new indirect edges
1226 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1230 recursive_inlining (struct cgraph_edge
*edge
,
1231 vec
<cgraph_edge_p
> *new_edges
)
1233 int limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO
);
1235 struct cgraph_node
*node
;
1236 struct cgraph_edge
*e
;
1237 struct cgraph_node
*master_clone
= NULL
, *next
;
1241 node
= edge
->caller
;
1242 if (node
->global
.inlined_to
)
1243 node
= node
->global
.inlined_to
;
1245 if (DECL_DECLARED_INLINE_P (node
->symbol
.decl
))
1246 limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE
);
1248 /* Make sure that function is small enough to be considered for inlining. */
1249 if (estimate_size_after_inlining (node
, edge
) >= limit
)
1251 heap
= fibheap_new ();
1252 lookup_recursive_calls (node
, node
, heap
);
1253 if (fibheap_empty (heap
))
1255 fibheap_delete (heap
);
1261 " Performing recursive inlining on %s\n",
1262 cgraph_node_name (node
));
1264 /* Do the inlining and update list of recursive call during process. */
1265 while (!fibheap_empty (heap
))
1267 struct cgraph_edge
*curr
1268 = (struct cgraph_edge
*) fibheap_extract_min (heap
);
1269 struct cgraph_node
*cnode
, *dest
= curr
->callee
;
1271 if (!can_inline_edge_p (curr
, true))
1274 /* MASTER_CLONE is produced in the case we already started modified
1275 the function. Be sure to redirect edge to the original body before
1276 estimating growths otherwise we will be seeing growths after inlining
1277 the already modified body. */
1280 cgraph_redirect_edge_callee (curr
, master_clone
);
1281 reset_edge_growth_cache (curr
);
1284 if (estimate_size_after_inlining (node
, curr
) > limit
)
1286 cgraph_redirect_edge_callee (curr
, dest
);
1287 reset_edge_growth_cache (curr
);
1292 for (cnode
= curr
->caller
;
1293 cnode
->global
.inlined_to
; cnode
= cnode
->callers
->caller
)
1294 if (node
->symbol
.decl
1295 == cgraph_function_or_thunk_node (curr
->callee
, NULL
)->symbol
.decl
)
1298 if (!want_inline_self_recursive_call_p (curr
, node
, false, depth
))
1300 cgraph_redirect_edge_callee (curr
, dest
);
1301 reset_edge_growth_cache (curr
);
1308 " Inlining call of depth %i", depth
);
1311 fprintf (dump_file
, " called approx. %.2f times per call",
1312 (double)curr
->count
/ node
->count
);
1314 fprintf (dump_file
, "\n");
1318 /* We need original clone to copy around. */
1319 master_clone
= cgraph_clone_node (node
, node
->symbol
.decl
,
1320 node
->count
, CGRAPH_FREQ_BASE
,
1321 false, vNULL
, true, NULL
);
1322 for (e
= master_clone
->callees
; e
; e
= e
->next_callee
)
1323 if (!e
->inline_failed
)
1324 clone_inlined_nodes (e
, true, false, NULL
);
1325 cgraph_redirect_edge_callee (curr
, master_clone
);
1326 reset_edge_growth_cache (curr
);
1329 inline_call (curr
, false, new_edges
, &overall_size
, true);
1330 lookup_recursive_calls (node
, curr
->callee
, heap
);
1334 if (!fibheap_empty (heap
) && dump_file
)
1335 fprintf (dump_file
, " Recursive inlining growth limit met.\n");
1336 fibheap_delete (heap
);
1343 "\n Inlined %i times, "
1344 "body grown from size %i to %i, time %i to %i\n", n
,
1345 inline_summary (master_clone
)->size
, inline_summary (node
)->size
,
1346 inline_summary (master_clone
)->time
, inline_summary (node
)->time
);
1348 /* Remove master clone we used for inlining. We rely that clones inlined
1349 into master clone gets queued just before master clone so we don't
1351 for (node
= cgraph_first_function (); node
!= master_clone
;
1354 next
= cgraph_next_function (node
);
1355 if (node
->global
.inlined_to
== master_clone
)
1356 cgraph_remove_node (node
);
1358 cgraph_remove_node (master_clone
);
1363 /* Given whole compilation unit estimate of INSNS, compute how large we can
1364 allow the unit to grow. */
1367 compute_max_insns (int insns
)
1369 int max_insns
= insns
;
1370 if (max_insns
< PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
))
1371 max_insns
= PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
);
1373 return ((HOST_WIDEST_INT
) max_insns
1374 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH
)) / 100);
1378 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1381 add_new_edges_to_heap (fibheap_t heap
, vec
<cgraph_edge_p
> new_edges
)
1383 while (new_edges
.length () > 0)
1385 struct cgraph_edge
*edge
= new_edges
.pop ();
1387 gcc_assert (!edge
->aux
);
1388 if (edge
->inline_failed
1389 && can_inline_edge_p (edge
, true)
1390 && want_inline_small_function_p (edge
, true))
1391 edge
->aux
= fibheap_insert (heap
, edge_badness (edge
, false), edge
);
1395 /* Remove EDGE from the fibheap. */
1398 heap_edge_removal_hook (struct cgraph_edge
*e
, void *data
)
1402 fibheap_delete_node ((fibheap_t
)data
, (fibnode_t
)e
->aux
);
1407 /* Return true if speculation of edge E seems useful.
1408 If ANTICIPATE_INLINING is true, be conservative and hope that E
1412 speculation_useful_p (struct cgraph_edge
*e
, bool anticipate_inlining
)
1414 enum availability avail
;
1415 struct cgraph_node
*target
= cgraph_function_or_thunk_node (e
->callee
, &avail
);
1416 struct cgraph_edge
*direct
, *indirect
;
1417 struct ipa_ref
*ref
;
1419 gcc_assert (e
->speculative
&& !e
->indirect_unknown_callee
);
1421 if (!cgraph_maybe_hot_edge_p (e
))
1424 /* See if IP optimizations found something potentially useful about the
1425 function. For now we look only for CONST/PURE flags. Almost everything
1426 else we propagate is useless. */
1427 if (avail
>= AVAIL_AVAILABLE
)
1429 int ecf_flags
= flags_from_decl_or_type (target
->symbol
.decl
);
1430 if (ecf_flags
& ECF_CONST
)
1432 cgraph_speculative_call_info (e
, direct
, indirect
, ref
);
1433 if (!(indirect
->indirect_info
->ecf_flags
& ECF_CONST
))
1436 else if (ecf_flags
& ECF_PURE
)
1438 cgraph_speculative_call_info (e
, direct
, indirect
, ref
);
1439 if (!(indirect
->indirect_info
->ecf_flags
& ECF_PURE
))
1443 /* If we did not managed to inline the function nor redirect
1444 to an ipa-cp clone (that are seen by having local flag set),
1445 it is probably pointless to inline it unless hardware is missing
1446 indirect call predictor. */
1447 if (!anticipate_inlining
&& e
->inline_failed
&& !target
->local
.local
)
1449 /* For overwritable targets there is not much to do. */
1450 if (e
->inline_failed
&& !can_inline_edge_p (e
, false, true))
1452 /* OK, speculation seems interesting. */
1456 /* We know that EDGE is not going to be inlined.
1457 See if we can remove speculation. */
1460 resolve_noninline_speculation (fibheap_t edge_heap
, struct cgraph_edge
*edge
)
1462 if (edge
->speculative
&& !speculation_useful_p (edge
, false))
1464 struct cgraph_node
*node
= edge
->caller
;
1465 struct cgraph_node
*where
= node
->global
.inlined_to
1466 ? node
->global
.inlined_to
: node
;
1467 bitmap updated_nodes
= BITMAP_ALLOC (NULL
);
1469 cgraph_resolve_speculation (edge
, NULL
);
1470 reset_node_growth_cache (where
);
1471 reset_edge_caches (where
);
1472 inline_update_overall_summary (where
);
1473 update_caller_keys (edge_heap
, where
,
1474 updated_nodes
, NULL
);
1475 reset_node_growth_cache (where
);
1476 BITMAP_FREE (updated_nodes
);
1480 /* We use greedy algorithm for inlining of small functions:
1481 All inline candidates are put into prioritized heap ordered in
1484 The inlining of small functions is bounded by unit growth parameters. */
1487 inline_small_functions (void)
1489 struct cgraph_node
*node
;
1490 struct cgraph_edge
*edge
;
1491 fibheap_t edge_heap
= fibheap_new ();
1492 bitmap updated_nodes
= BITMAP_ALLOC (NULL
);
1493 int min_size
, max_size
;
1494 vec
<cgraph_edge_p
> new_indirect_edges
= vNULL
;
1495 int initial_size
= 0;
1496 struct cgraph_node
**order
= XCNEWVEC (struct cgraph_node
*, cgraph_n_nodes
);
1497 struct cgraph_edge_hook_list
*edge_removal_hook_holder
;
1499 if (flag_indirect_inlining
)
1500 new_indirect_edges
.create (8);
1502 edge_removal_hook_holder
1503 = cgraph_add_edge_removal_hook (&heap_edge_removal_hook
, edge_heap
);
1505 /* Compute overall unit size and other global parameters used by badness
1509 ipa_reduced_postorder (order
, true, true, NULL
);
1512 FOR_EACH_DEFINED_FUNCTION (node
)
1513 if (!node
->global
.inlined_to
)
1515 if (cgraph_function_with_gimple_body_p (node
)
1516 || node
->thunk
.thunk_p
)
1518 struct inline_summary
*info
= inline_summary (node
);
1519 struct ipa_dfs_info
*dfs
= (struct ipa_dfs_info
*) node
->symbol
.aux
;
1521 if (!DECL_EXTERNAL (node
->symbol
.decl
))
1522 initial_size
+= info
->size
;
1523 info
->growth
= estimate_growth (node
);
1524 if (dfs
&& dfs
->next_cycle
)
1526 struct cgraph_node
*n2
;
1527 int id
= dfs
->scc_no
+ 1;
1529 n2
= ((struct ipa_dfs_info
*) node
->symbol
.aux
)->next_cycle
)
1531 struct inline_summary
*info2
= inline_summary (n2
);
1539 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1540 if (max_count
< edge
->count
)
1541 max_count
= edge
->count
;
1543 ipa_free_postorder_info ();
1544 initialize_growth_caches ();
1548 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1551 overall_size
= initial_size
;
1552 max_size
= compute_max_insns (overall_size
);
1553 min_size
= overall_size
;
1555 /* Populate the heeap with all edges we might inline. */
1557 FOR_EACH_DEFINED_FUNCTION (node
)
1559 bool update
= false;
1560 struct cgraph_edge
*next
;
1563 fprintf (dump_file
, "Enqueueing calls in %s/%i.\n",
1564 cgraph_node_name (node
), node
->symbol
.order
);
1566 for (edge
= node
->callees
; edge
; edge
= next
)
1568 next
= edge
->next_callee
;
1569 if (edge
->inline_failed
1571 && can_inline_edge_p (edge
, true)
1572 && want_inline_small_function_p (edge
, true)
1573 && edge
->inline_failed
)
1575 gcc_assert (!edge
->aux
);
1576 update_edge_key (edge_heap
, edge
);
1578 if (edge
->speculative
&& !speculation_useful_p (edge
, edge
->aux
!= NULL
))
1580 cgraph_resolve_speculation (edge
, NULL
);
1586 struct cgraph_node
*where
= node
->global
.inlined_to
1587 ? node
->global
.inlined_to
: node
;
1588 inline_update_overall_summary (where
);
1589 reset_node_growth_cache (where
);
1590 reset_edge_caches (where
);
1591 update_caller_keys (edge_heap
, where
,
1592 updated_nodes
, NULL
);
1593 bitmap_clear (updated_nodes
);
1597 gcc_assert (in_lto_p
1599 || (profile_info
&& flag_branch_probabilities
));
1601 while (!fibheap_empty (edge_heap
))
1603 int old_size
= overall_size
;
1604 struct cgraph_node
*where
, *callee
;
1605 int badness
= fibheap_min_key (edge_heap
);
1606 int current_badness
;
1610 edge
= (struct cgraph_edge
*) fibheap_extract_min (edge_heap
);
1611 gcc_assert (edge
->aux
);
1613 if (!edge
->inline_failed
)
1616 /* Be sure that caches are maintained consistent.
1617 We can not make this ENABLE_CHECKING only because it cause different
1618 updates of the fibheap queue. */
1619 cached_badness
= edge_badness (edge
, false);
1620 reset_edge_growth_cache (edge
);
1621 reset_node_growth_cache (edge
->callee
);
1623 /* When updating the edge costs, we only decrease badness in the keys.
1624 Increases of badness are handled lazilly; when we see key with out
1625 of date value on it, we re-insert it now. */
1626 current_badness
= edge_badness (edge
, false);
1627 gcc_assert (cached_badness
== current_badness
);
1628 gcc_assert (current_badness
>= badness
);
1629 if (current_badness
!= badness
)
1631 edge
->aux
= fibheap_insert (edge_heap
, current_badness
, edge
);
1635 if (!can_inline_edge_p (edge
, true))
1637 resolve_noninline_speculation (edge_heap
, edge
);
1641 callee
= cgraph_function_or_thunk_node (edge
->callee
, NULL
);
1642 growth
= estimate_edge_growth (edge
);
1646 "\nConsidering %s/%i with %i size\n",
1647 cgraph_node_name (callee
), callee
->symbol
.order
,
1648 inline_summary (callee
)->size
);
1650 " to be inlined into %s/%i in %s:%i\n"
1651 " Estimated growth after inlined into all is %+i insns.\n"
1652 " Estimated badness is %i, frequency %.2f.\n",
1653 cgraph_node_name (edge
->caller
), edge
->caller
->symbol
.order
,
1654 flag_wpa
? "unknown"
1655 : gimple_filename ((const_gimple
) edge
->call_stmt
),
1657 : gimple_lineno ((const_gimple
) edge
->call_stmt
),
1658 estimate_growth (callee
),
1660 edge
->frequency
/ (double)CGRAPH_FREQ_BASE
);
1662 fprintf (dump_file
," Called "HOST_WIDEST_INT_PRINT_DEC
"x\n",
1664 if (dump_flags
& TDF_DETAILS
)
1665 edge_badness (edge
, true);
1668 if (overall_size
+ growth
> max_size
1669 && !DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
))
1671 edge
->inline_failed
= CIF_INLINE_UNIT_GROWTH_LIMIT
;
1672 report_inline_failed_reason (edge
);
1673 resolve_noninline_speculation (edge_heap
, edge
);
1677 if (!want_inline_small_function_p (edge
, true))
1679 resolve_noninline_speculation (edge_heap
, edge
);
1683 /* Heuristics for inlining small functions works poorly for
1684 recursive calls where we do efect similar to loop unrolling.
1685 When inliing such edge seems profitable, leave decision on
1686 specific inliner. */
1687 if (cgraph_edge_recursive_p (edge
))
1689 where
= edge
->caller
;
1690 if (where
->global
.inlined_to
)
1691 where
= where
->global
.inlined_to
;
1692 if (!recursive_inlining (edge
,
1693 flag_indirect_inlining
1694 ? &new_indirect_edges
: NULL
))
1696 edge
->inline_failed
= CIF_RECURSIVE_INLINING
;
1697 resolve_noninline_speculation (edge_heap
, edge
);
1700 reset_edge_caches (where
);
1701 /* Recursive inliner inlines all recursive calls of the function
1702 at once. Consequently we need to update all callee keys. */
1703 if (flag_indirect_inlining
)
1704 add_new_edges_to_heap (edge_heap
, new_indirect_edges
);
1705 update_callee_keys (edge_heap
, where
, updated_nodes
);
1706 bitmap_clear (updated_nodes
);
1710 struct cgraph_node
*outer_node
= NULL
;
1713 /* Consider the case where self recursive function A is inlined into B.
1714 This is desired optimization in some cases, since it leads to effect
1715 similar of loop peeling and we might completely optimize out the
1716 recursive call. However we must be extra selective. */
1718 where
= edge
->caller
;
1719 while (where
->global
.inlined_to
)
1721 if (where
->symbol
.decl
== callee
->symbol
.decl
)
1722 outer_node
= where
, depth
++;
1723 where
= where
->callers
->caller
;
1726 && !want_inline_self_recursive_call_p (edge
, outer_node
,
1730 = (DECL_DISREGARD_INLINE_LIMITS (edge
->callee
->symbol
.decl
)
1731 ? CIF_RECURSIVE_INLINING
: CIF_UNSPECIFIED
);
1732 resolve_noninline_speculation (edge_heap
, edge
);
1735 else if (depth
&& dump_file
)
1736 fprintf (dump_file
, " Peeling recursion with depth %i\n", depth
);
1738 gcc_checking_assert (!callee
->global
.inlined_to
);
1739 inline_call (edge
, true, &new_indirect_edges
, &overall_size
, true);
1740 if (flag_indirect_inlining
)
1741 add_new_edges_to_heap (edge_heap
, new_indirect_edges
);
1743 reset_edge_caches (edge
->callee
);
1744 reset_node_growth_cache (callee
);
1746 update_callee_keys (edge_heap
, where
, updated_nodes
);
1748 where
= edge
->caller
;
1749 if (where
->global
.inlined_to
)
1750 where
= where
->global
.inlined_to
;
1752 /* Our profitability metric can depend on local properties
1753 such as number of inlinable calls and size of the function body.
1754 After inlining these properties might change for the function we
1755 inlined into (since it's body size changed) and for the functions
1756 called by function we inlined (since number of it inlinable callers
1758 update_caller_keys (edge_heap
, where
, updated_nodes
, NULL
);
1759 bitmap_clear (updated_nodes
);
1764 " Inlined into %s which now has time %i and size %i,"
1765 "net change of %+i.\n",
1766 cgraph_node_name (edge
->caller
),
1767 inline_summary (edge
->caller
)->time
,
1768 inline_summary (edge
->caller
)->size
,
1769 overall_size
- old_size
);
1771 if (min_size
> overall_size
)
1773 min_size
= overall_size
;
1774 max_size
= compute_max_insns (min_size
);
1777 fprintf (dump_file
, "New minimal size reached: %i\n", min_size
);
1781 free_growth_caches ();
1782 new_indirect_edges
.release ();
1783 fibheap_delete (edge_heap
);
1786 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1787 initial_size
, overall_size
,
1788 initial_size
? overall_size
* 100 / (initial_size
) - 100: 0);
1789 BITMAP_FREE (updated_nodes
);
1790 cgraph_remove_edge_removal_hook (edge_removal_hook_holder
);
1793 /* Flatten NODE. Performed both during early inlining and
1794 at IPA inlining time. */
1797 flatten_function (struct cgraph_node
*node
, bool early
)
1799 struct cgraph_edge
*e
;
1801 /* We shouldn't be called recursively when we are being processed. */
1802 gcc_assert (node
->symbol
.aux
== NULL
);
1804 node
->symbol
.aux
= (void *) node
;
1806 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1808 struct cgraph_node
*orig_callee
;
1809 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
1811 /* We've hit cycle? It is time to give up. */
1812 if (callee
->symbol
.aux
)
1816 "Not inlining %s into %s to avoid cycle.\n",
1817 xstrdup (cgraph_node_name (callee
)),
1818 xstrdup (cgraph_node_name (e
->caller
)));
1819 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
1823 /* When the edge is already inlined, we just need to recurse into
1824 it in order to fully flatten the leaves. */
1825 if (!e
->inline_failed
)
1827 flatten_function (callee
, early
);
1831 /* Flatten attribute needs to be processed during late inlining. For
1832 extra code quality we however do flattening during early optimization,
1835 ? !can_inline_edge_p (e
, true)
1836 : !can_early_inline_edge_p (e
))
1839 if (cgraph_edge_recursive_p (e
))
1842 fprintf (dump_file
, "Not inlining: recursive call.\n");
1846 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node
->symbol
.decl
))
1847 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee
->symbol
.decl
)))
1850 fprintf (dump_file
, "Not inlining: SSA form does not match.\n");
1854 /* Inline the edge and flatten the inline clone. Avoid
1855 recursing through the original node if the node was cloned. */
1857 fprintf (dump_file
, " Inlining %s into %s.\n",
1858 xstrdup (cgraph_node_name (callee
)),
1859 xstrdup (cgraph_node_name (e
->caller
)));
1860 orig_callee
= callee
;
1861 inline_call (e
, true, NULL
, NULL
, false);
1862 if (e
->callee
!= orig_callee
)
1863 orig_callee
->symbol
.aux
= (void *) node
;
1864 flatten_function (e
->callee
, early
);
1865 if (e
->callee
!= orig_callee
)
1866 orig_callee
->symbol
.aux
= NULL
;
1869 node
->symbol
.aux
= NULL
;
1870 if (!node
->global
.inlined_to
)
1871 inline_update_overall_summary (node
);
1874 /* Decide on the inlining. We do so in the topological order to avoid
1875 expenses on updating data structures. */
1880 struct cgraph_node
*node
;
1882 struct cgraph_node
**order
=
1883 XCNEWVEC (struct cgraph_node
*, cgraph_n_nodes
);
1887 if (in_lto_p
&& optimize
)
1888 ipa_update_after_lto_read ();
1891 dump_inline_summaries (dump_file
);
1893 nnodes
= ipa_reverse_postorder (order
);
1895 FOR_EACH_FUNCTION (node
)
1896 node
->symbol
.aux
= 0;
1899 fprintf (dump_file
, "\nFlattening functions:\n");
1901 /* In the first pass handle functions to be flattened. Do this with
1902 a priority so none of our later choices will make this impossible. */
1903 for (i
= nnodes
- 1; i
>= 0; i
--)
1907 /* Handle nodes to be flattened.
1908 Ideally when processing callees we stop inlining at the
1909 entry of cycles, possibly cloning that entry point and
1910 try to flatten itself turning it into a self-recursive
1912 if (lookup_attribute ("flatten",
1913 DECL_ATTRIBUTES (node
->symbol
.decl
)) != NULL
)
1917 "Flattening %s\n", cgraph_node_name (node
));
1918 flatten_function (node
, false);
1922 inline_small_functions ();
1924 /* Do first after-inlining removal. We want to remove all "stale" extern inline
1925 functions and virtual functions so we really know what is called once. */
1926 symtab_remove_unreachable_nodes (false, dump_file
);
1929 /* Inline functions with a property that after inlining into all callers the
1930 code size will shrink because the out-of-line copy is eliminated.
1931 We do this regardless on the callee size as long as function growth limits
1935 "\nDeciding on functions to be inlined into all callers and removing useless speculations:\n");
1937 /* Inlining one function called once has good chance of preventing
1938 inlining other function into the same callee. Ideally we should
1939 work in priority order, but probably inlining hot functions first
1940 is good cut without the extra pain of maintaining the queue.
1942 ??? this is not really fitting the bill perfectly: inlining function
1943 into callee often leads to better optimization of callee due to
1944 increased context for optimization.
1945 For example if main() function calls a function that outputs help
1946 and then function that does the main optmization, we should inline
1947 the second with priority even if both calls are cold by themselves.
1949 We probably want to implement new predicate replacing our use of
1950 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
1952 for (cold
= 0; cold
<= 1; cold
++)
1954 FOR_EACH_DEFINED_FUNCTION (node
)
1956 struct cgraph_edge
*edge
, *next
;
1959 for (edge
= node
->callees
; edge
; edge
= next
)
1961 next
= edge
->next_callee
;
1962 if (edge
->speculative
&& !speculation_useful_p (edge
, false))
1964 cgraph_resolve_speculation (edge
, NULL
);
1970 struct cgraph_node
*where
= node
->global
.inlined_to
1971 ? node
->global
.inlined_to
: node
;
1972 reset_node_growth_cache (where
);
1973 reset_edge_caches (where
);
1974 inline_update_overall_summary (where
);
1976 if (flag_inline_functions_called_once
1977 && want_inline_function_to_all_callers_p (node
, cold
))
1980 struct cgraph_edge
*e
;
1981 for (e
= node
->callers
; e
; e
= e
->next_caller
)
1983 while (node
->callers
&& !node
->global
.inlined_to
)
1985 struct cgraph_node
*caller
= node
->callers
->caller
;
1990 "\nInlining %s size %i.\n",
1991 cgraph_node_name (node
),
1992 inline_summary (node
)->size
);
1994 " Called once from %s %i insns.\n",
1995 cgraph_node_name (node
->callers
->caller
),
1996 inline_summary (node
->callers
->caller
)->size
);
1999 inline_call (node
->callers
, true, NULL
, NULL
, true);
2002 " Inlined into %s which now has %i size\n",
2003 cgraph_node_name (caller
),
2004 inline_summary (caller
)->size
);
2008 fprintf (dump_file
, "New calls found; giving up.\n");
2016 /* Free ipa-prop structures if they are no longer needed. */
2018 ipa_free_all_structures_after_iinln ();
2022 "\nInlined %i calls, eliminated %i functions\n\n",
2023 ncalls_inlined
, nfunctions_inlined
);
2026 dump_inline_summaries (dump_file
);
2027 /* In WPA we use inline summaries for partitioning process. */
2029 inline_free_summary ();
2033 /* Inline always-inline function calls in NODE. */
2036 inline_always_inline_functions (struct cgraph_node
*node
)
2038 struct cgraph_edge
*e
;
2039 bool inlined
= false;
2041 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2043 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
2044 if (!DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
))
2047 if (cgraph_edge_recursive_p (e
))
2050 fprintf (dump_file
, " Not inlining recursive call to %s.\n",
2051 cgraph_node_name (e
->callee
));
2052 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
2056 if (!can_early_inline_edge_p (e
))
2058 /* Set inlined to true if the callee is marked "always_inline" but
2059 is not inlinable. This will allow flagging an error later in
2060 expand_call_inline in tree-inline.c. */
2061 if (lookup_attribute ("always_inline",
2062 DECL_ATTRIBUTES (callee
->symbol
.decl
)) != NULL
)
2068 fprintf (dump_file
, " Inlining %s into %s (always_inline).\n",
2069 xstrdup (cgraph_node_name (e
->callee
)),
2070 xstrdup (cgraph_node_name (e
->caller
)));
2071 inline_call (e
, true, NULL
, NULL
, false);
2075 inline_update_overall_summary (node
);
2080 /* Decide on the inlining. We do so in the topological order to avoid
2081 expenses on updating data structures. */
2084 early_inline_small_functions (struct cgraph_node
*node
)
2086 struct cgraph_edge
*e
;
2087 bool inlined
= false;
2089 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2091 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
2092 if (!inline_summary (callee
)->inlinable
2093 || !e
->inline_failed
)
2096 /* Do not consider functions not declared inline. */
2097 if (!DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
2098 && !flag_inline_small_functions
2099 && !flag_inline_functions
)
2103 fprintf (dump_file
, "Considering inline candidate %s.\n",
2104 cgraph_node_name (callee
));
2106 if (!can_early_inline_edge_p (e
))
2109 if (cgraph_edge_recursive_p (e
))
2112 fprintf (dump_file
, " Not inlining: recursive call.\n");
2116 if (!want_early_inline_function_p (e
))
2120 fprintf (dump_file
, " Inlining %s into %s.\n",
2121 xstrdup (cgraph_node_name (callee
)),
2122 xstrdup (cgraph_node_name (e
->caller
)));
2123 inline_call (e
, true, NULL
, NULL
, true);
2130 /* Do inlining of small functions. Doing so early helps profiling and other
2131 passes to be somewhat more effective and avoids some code duplication in
2132 later real inlining pass for testcases with very many function calls. */
2134 early_inliner (void)
2136 struct cgraph_node
*node
= cgraph_get_node (current_function_decl
);
2137 struct cgraph_edge
*edge
;
2138 unsigned int todo
= 0;
2140 bool inlined
= false;
2145 /* Do nothing if datastructures for ipa-inliner are already computed. This
2146 happens when some pass decides to construct new function and
2147 cgraph_add_new_function calls lowering passes and early optimization on
2148 it. This may confuse ourself when early inliner decide to inline call to
2149 function clone, because function clones don't have parameter list in
2150 ipa-prop matching their signature. */
2151 if (ipa_node_params_vector
.exists ())
2154 #ifdef ENABLE_CHECKING
2155 verify_cgraph_node (node
);
2157 ipa_remove_all_references (&node
->symbol
.ref_list
);
2159 /* Even when not optimizing or not inlining inline always-inline
2161 inlined
= inline_always_inline_functions (node
);
2165 || !flag_early_inlining
2166 /* Never inline regular functions into always-inline functions
2167 during incremental inlining. This sucks as functions calling
2168 always inline functions will get less optimized, but at the
2169 same time inlining of functions calling always inline
2170 function into an always inline function might introduce
2171 cycles of edges to be always inlined in the callgraph.
2173 We might want to be smarter and just avoid this type of inlining. */
2174 || DECL_DISREGARD_INLINE_LIMITS (node
->symbol
.decl
))
2176 else if (lookup_attribute ("flatten",
2177 DECL_ATTRIBUTES (node
->symbol
.decl
)) != NULL
)
2179 /* When the function is marked to be flattened, recursively inline
2183 "Flattening %s\n", cgraph_node_name (node
));
2184 flatten_function (node
, true);
2189 /* We iterate incremental inlining to get trivial cases of indirect
2191 while (iterations
< PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS
)
2192 && early_inline_small_functions (node
))
2194 timevar_push (TV_INTEGRATION
);
2195 todo
|= optimize_inline_calls (current_function_decl
);
2197 /* Technically we ought to recompute inline parameters so the new
2198 iteration of early inliner works as expected. We however have
2199 values approximately right and thus we only need to update edge
2200 info that might be cleared out for newly discovered edges. */
2201 for (edge
= node
->callees
; edge
; edge
= edge
->next_callee
)
2203 struct inline_edge_summary
*es
= inline_edge_summary (edge
);
2205 = estimate_num_insns (edge
->call_stmt
, &eni_size_weights
);
2207 = estimate_num_insns (edge
->call_stmt
, &eni_time_weights
);
2208 if (edge
->callee
->symbol
.decl
2209 && !gimple_check_call_matching_types (
2210 edge
->call_stmt
, edge
->callee
->symbol
.decl
, false))
2211 edge
->call_stmt_cannot_inline_p
= true;
2213 timevar_pop (TV_INTEGRATION
);
2218 fprintf (dump_file
, "Iterations: %i\n", iterations
);
2223 timevar_push (TV_INTEGRATION
);
2224 todo
|= optimize_inline_calls (current_function_decl
);
2225 timevar_pop (TV_INTEGRATION
);
2228 cfun
->always_inline_functions_inlined
= true;
2235 const pass_data pass_data_early_inline
=
2237 GIMPLE_PASS
, /* type */
2238 "einline", /* name */
2239 OPTGROUP_INLINE
, /* optinfo_flags */
2240 false, /* has_gate */
2241 true, /* has_execute */
2242 TV_EARLY_INLINING
, /* tv_id */
2243 PROP_ssa
, /* properties_required */
2244 0, /* properties_provided */
2245 0, /* properties_destroyed */
2246 0, /* todo_flags_start */
2247 0, /* todo_flags_finish */
2250 class pass_early_inline
: public gimple_opt_pass
2253 pass_early_inline(gcc::context
*ctxt
)
2254 : gimple_opt_pass(pass_data_early_inline
, ctxt
)
2257 /* opt_pass methods: */
2258 unsigned int execute () { return early_inliner (); }
2260 }; // class pass_early_inline
2265 make_pass_early_inline (gcc::context
*ctxt
)
2267 return new pass_early_inline (ctxt
);
2271 /* When to run IPA inlining. Inlining of always-inline functions
2272 happens during early inlining.
2274 Enable inlining unconditoinally at -flto. We need size estimates to
2275 drive partitioning. */
2278 gate_ipa_inline (void)
2280 return optimize
|| flag_lto
|| flag_wpa
;
2285 const pass_data pass_data_ipa_inline
=
2287 IPA_PASS
, /* type */
2288 "inline", /* name */
2289 OPTGROUP_INLINE
, /* optinfo_flags */
2290 true, /* has_gate */
2291 true, /* has_execute */
2292 TV_IPA_INLINING
, /* tv_id */
2293 0, /* properties_required */
2294 0, /* properties_provided */
2295 0, /* properties_destroyed */
2296 TODO_remove_functions
, /* todo_flags_start */
2297 ( TODO_dump_symtab
| TODO_remove_functions
), /* todo_flags_finish */
2300 class pass_ipa_inline
: public ipa_opt_pass_d
2303 pass_ipa_inline(gcc::context
*ctxt
)
2304 : ipa_opt_pass_d(pass_data_ipa_inline
, ctxt
,
2305 inline_generate_summary
, /* generate_summary */
2306 inline_write_summary
, /* write_summary */
2307 inline_read_summary
, /* read_summary */
2308 NULL
, /* write_optimization_summary */
2309 NULL
, /* read_optimization_summary */
2310 NULL
, /* stmt_fixup */
2311 0, /* function_transform_todo_flags_start */
2312 inline_transform
, /* function_transform */
2313 NULL
) /* variable_transform */
2316 /* opt_pass methods: */
2317 bool gate () { return gate_ipa_inline (); }
2318 unsigned int execute () { return ipa_inline (); }
2320 }; // class pass_ipa_inline
2325 make_pass_ipa_inline (gcc::context
*ctxt
)
2327 return new pass_ipa_inline (ctxt
);