1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 The implementation of inliner is organized as follows:
26 inlining heuristics limits
28 can_inline_edge_p allow to check that particular inlining is allowed
29 by the limits specified by user (allowed function growth, growth and so
32 Functions are inlined when it is obvious the result is profitable (such
33 as functions called once or when inlining reduce code size).
34 In addition to that we perform inlining of small functions and recursive
39 The inliner itself is split into two passes:
43 Simple local inlining pass inlining callees into current function.
44 This pass makes no use of whole unit analysis and thus it can do only
45 very simple decisions based on local properties.
47 The strength of the pass is that it is run in topological order
48 (reverse postorder) on the callgraph. Functions are converted into SSA
49 form just before this pass and optimized subsequently. As a result, the
50 callees of the function seen by the early inliner was already optimized
51 and results of early inlining adds a lot of optimization opportunities
52 for the local optimization.
54 The pass handle the obvious inlining decisions within the compilation
55 unit - inlining auto inline functions, inlining for size and
58 main strength of the pass is the ability to eliminate abstraction
59 penalty in C++ code (via combination of inlining and early
60 optimization) and thus improve quality of analysis done by real IPA
63 Because of lack of whole unit knowledge, the pass can not really make
64 good code size/performance tradeoffs. It however does very simple
65 speculative inlining allowing code size to grow by
66 EARLY_INLINING_INSNS when callee is leaf function. In this case the
67 optimizations performed later are very likely to eliminate the cost.
71 This is the real inliner able to handle inlining with whole program
72 knowledge. It performs following steps:
74 1) inlining of small functions. This is implemented by greedy
75 algorithm ordering all inlinable cgraph edges by their badness and
76 inlining them in this order as long as inline limits allows doing so.
78 This heuristics is not very good on inlining recursive calls. Recursive
79 calls can be inlined with results similar to loop unrolling. To do so,
80 special purpose recursive inliner is executed on function when
81 recursive edge is met as viable candidate.
83 2) Unreachable functions are removed from callgraph. Inlining leads
84 to devirtualization and other modification of callgraph so functions
85 may become unreachable during the process. Also functions declared as
86 extern inline or virtual functions are removed, since after inlining
87 we no longer need the offline bodies.
89 3) Functions called once and not exported from the unit are inlined.
90 This should almost always lead to reduction of code size by eliminating
91 the need for offline copy of the function. */
95 #include "coretypes.h"
98 #include "tree-inline.h"
99 #include "langhooks.h"
102 #include "diagnostic.h"
103 #include "gimple-pretty-print.h"
107 #include "tree-pass.h"
108 #include "coverage.h"
111 #include "tree-flow.h"
112 #include "ipa-prop.h"
115 #include "ipa-inline.h"
116 #include "ipa-utils.h"
118 /* Statistics we collect about inlining algorithm. */
119 static int overall_size
;
120 static gcov_type max_count
;
122 /* Return false when inlining edge E would lead to violating
123 limits on function unit growth or stack usage growth.
125 The relative function body growth limit is present generally
126 to avoid problems with non-linear behavior of the compiler.
127 To allow inlining huge functions into tiny wrapper, the limit
128 is always based on the bigger of the two functions considered.
130 For stack growth limits we always base the growth in stack usage
131 of the callers. We want to prevent applications from segfaulting
132 on stack overflow when functions with huge stack frames gets
136 caller_growth_limits (struct cgraph_edge
*e
)
138 struct cgraph_node
*to
= e
->caller
;
139 struct cgraph_node
*what
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
142 HOST_WIDE_INT stack_size_limit
= 0, inlined_stack
;
143 struct inline_summary
*info
, *what_info
, *outer_info
= inline_summary (to
);
145 /* Look for function e->caller is inlined to. While doing
146 so work out the largest function body on the way. As
147 described above, we want to base our function growth
148 limits based on that. Not on the self size of the
149 outer function, not on the self size of inline code
150 we immediately inline to. This is the most relaxed
151 interpretation of the rule "do not grow large functions
152 too much in order to prevent compiler from exploding". */
155 info
= inline_summary (to
);
156 if (limit
< info
->self_size
)
157 limit
= info
->self_size
;
158 if (stack_size_limit
< info
->estimated_self_stack_size
)
159 stack_size_limit
= info
->estimated_self_stack_size
;
160 if (to
->global
.inlined_to
)
161 to
= to
->callers
->caller
;
166 what_info
= inline_summary (what
);
168 if (limit
< what_info
->self_size
)
169 limit
= what_info
->self_size
;
171 limit
+= limit
* PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH
) / 100;
173 /* Check the size after inlining against the function limits. But allow
174 the function to shrink if it went over the limits by forced inlining. */
175 newsize
= estimate_size_after_inlining (to
, e
);
176 if (newsize
>= info
->size
177 && newsize
> PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS
)
180 e
->inline_failed
= CIF_LARGE_FUNCTION_GROWTH_LIMIT
;
184 if (!what_info
->estimated_stack_size
)
187 /* FIXME: Stack size limit often prevents inlining in Fortran programs
188 due to large i/o datastructures used by the Fortran front-end.
189 We ought to ignore this limit when we know that the edge is executed
190 on every invocation of the caller (i.e. its call statement dominates
191 exit block). We do not track this information, yet. */
192 stack_size_limit
+= ((gcov_type
)stack_size_limit
193 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH
) / 100);
195 inlined_stack
= (outer_info
->stack_frame_offset
196 + outer_info
->estimated_self_stack_size
197 + what_info
->estimated_stack_size
);
198 /* Check new stack consumption with stack consumption at the place
200 if (inlined_stack
> stack_size_limit
201 /* If function already has large stack usage from sibling
202 inline call, we can inline, too.
203 This bit overoptimistically assume that we are good at stack
205 && inlined_stack
> info
->estimated_stack_size
206 && inlined_stack
> PARAM_VALUE (PARAM_LARGE_STACK_FRAME
))
208 e
->inline_failed
= CIF_LARGE_STACK_FRAME_GROWTH_LIMIT
;
214 /* Dump info about why inlining has failed. */
217 report_inline_failed_reason (struct cgraph_edge
*e
)
221 fprintf (dump_file
, " not inlinable: %s/%i -> %s/%i, %s\n",
222 xstrdup (cgraph_node_name (e
->caller
)), e
->caller
->uid
,
223 xstrdup (cgraph_node_name (e
->callee
)), e
->callee
->uid
,
224 cgraph_inline_failed_string (e
->inline_failed
));
228 /* Decide if we can inline the edge and possibly update
229 inline_failed reason.
230 We check whether inlining is possible at all and whether
231 caller growth limits allow doing so.
233 if REPORT is true, output reason to the dump file. */
236 can_inline_edge_p (struct cgraph_edge
*e
, bool report
)
238 bool inlinable
= true;
239 enum availability avail
;
240 struct cgraph_node
*callee
241 = cgraph_function_or_thunk_node (e
->callee
, &avail
);
242 tree caller_tree
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e
->caller
->symbol
.decl
);
244 = callee
? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee
->symbol
.decl
) : NULL
;
245 struct function
*caller_cfun
= DECL_STRUCT_FUNCTION (e
->caller
->symbol
.decl
);
246 struct function
*callee_cfun
247 = callee
? DECL_STRUCT_FUNCTION (callee
->symbol
.decl
) : NULL
;
249 if (!caller_cfun
&& e
->caller
->clone_of
)
250 caller_cfun
= DECL_STRUCT_FUNCTION (e
->caller
->clone_of
->symbol
.decl
);
252 if (!callee_cfun
&& callee
&& callee
->clone_of
)
253 callee_cfun
= DECL_STRUCT_FUNCTION (callee
->clone_of
->symbol
.decl
);
255 gcc_assert (e
->inline_failed
);
257 if (!callee
|| !callee
->analyzed
)
259 e
->inline_failed
= CIF_BODY_NOT_AVAILABLE
;
262 else if (!inline_summary (callee
)->inlinable
)
264 e
->inline_failed
= CIF_FUNCTION_NOT_INLINABLE
;
267 else if (avail
<= AVAIL_OVERWRITABLE
)
269 e
->inline_failed
= CIF_OVERWRITABLE
;
272 else if (e
->call_stmt_cannot_inline_p
)
274 e
->inline_failed
= CIF_MISMATCHED_ARGUMENTS
;
277 /* Don't inline if the functions have different EH personalities. */
278 else if (DECL_FUNCTION_PERSONALITY (e
->caller
->symbol
.decl
)
279 && DECL_FUNCTION_PERSONALITY (callee
->symbol
.decl
)
280 && (DECL_FUNCTION_PERSONALITY (e
->caller
->symbol
.decl
)
281 != DECL_FUNCTION_PERSONALITY (callee
->symbol
.decl
)))
283 e
->inline_failed
= CIF_EH_PERSONALITY
;
286 /* TM pure functions should not be inlined into non-TM_pure
288 else if (is_tm_pure (callee
->symbol
.decl
)
289 && !is_tm_pure (e
->caller
->symbol
.decl
))
291 e
->inline_failed
= CIF_UNSPECIFIED
;
294 /* Don't inline if the callee can throw non-call exceptions but the
296 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
297 Move the flag into cgraph node or mirror it in the inline summary. */
298 else if (callee_cfun
&& callee_cfun
->can_throw_non_call_exceptions
299 && !(caller_cfun
&& caller_cfun
->can_throw_non_call_exceptions
))
301 e
->inline_failed
= CIF_NON_CALL_EXCEPTIONS
;
304 /* Check compatibility of target optimization options. */
305 else if (!targetm
.target_option
.can_inline_p (e
->caller
->symbol
.decl
,
306 callee
->symbol
.decl
))
308 e
->inline_failed
= CIF_TARGET_OPTION_MISMATCH
;
311 /* Check if caller growth allows the inlining. */
312 else if (!DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
)
313 && !lookup_attribute ("flatten",
315 (e
->caller
->global
.inlined_to
316 ? e
->caller
->global
.inlined_to
->symbol
.decl
317 : e
->caller
->symbol
.decl
))
318 && !caller_growth_limits (e
))
320 /* Don't inline a function with a higher optimization level than the
321 caller. FIXME: this is really just tip of iceberg of handling
322 optimization attribute. */
323 else if (caller_tree
!= callee_tree
)
325 struct cl_optimization
*caller_opt
326 = TREE_OPTIMIZATION ((caller_tree
)
328 : optimization_default_node
);
330 struct cl_optimization
*callee_opt
331 = TREE_OPTIMIZATION ((callee_tree
)
333 : optimization_default_node
);
335 if (((caller_opt
->x_optimize
> callee_opt
->x_optimize
)
336 || (caller_opt
->x_optimize_size
!= callee_opt
->x_optimize_size
))
337 /* gcc.dg/pr43564.c. Look at forced inline even in -O0. */
338 && !DECL_DISREGARD_INLINE_LIMITS (e
->callee
->symbol
.decl
))
340 e
->inline_failed
= CIF_OPTIMIZATION_MISMATCH
;
345 if (!inlinable
&& report
)
346 report_inline_failed_reason (e
);
351 /* Return true if the edge E is inlinable during early inlining. */
354 can_early_inline_edge_p (struct cgraph_edge
*e
)
356 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
,
358 /* Early inliner might get called at WPA stage when IPA pass adds new
359 function. In this case we can not really do any of early inlining
360 because function bodies are missing. */
361 if (!gimple_has_body_p (callee
->symbol
.decl
))
363 e
->inline_failed
= CIF_BODY_NOT_AVAILABLE
;
366 /* In early inliner some of callees may not be in SSA form yet
367 (i.e. the callgraph is cyclic and we did not process
368 the callee by early inliner, yet). We don't have CIF code for this
369 case; later we will re-do the decision in the real inliner. */
370 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e
->caller
->symbol
.decl
))
371 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee
->symbol
.decl
)))
374 fprintf (dump_file
, " edge not inlinable: not in SSA form\n");
377 if (!can_inline_edge_p (e
, true))
383 /* Return number of calls in N. Ignore cheap builtins. */
386 num_calls (struct cgraph_node
*n
)
388 struct cgraph_edge
*e
;
391 for (e
= n
->callees
; e
; e
= e
->next_callee
)
392 if (!is_inexpensive_builtin (e
->callee
->symbol
.decl
))
398 /* Return true if we are interested in inlining small function. */
401 want_early_inline_function_p (struct cgraph_edge
*e
)
403 bool want_inline
= true;
404 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
406 if (DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
))
408 else if (!DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
409 && !flag_inline_small_functions
)
411 e
->inline_failed
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
412 report_inline_failed_reason (e
);
417 int growth
= estimate_edge_growth (e
);
422 else if (!cgraph_maybe_hot_edge_p (e
)
426 fprintf (dump_file
, " will not early inline: %s/%i->%s/%i, "
427 "call is cold and code would grow by %i\n",
428 xstrdup (cgraph_node_name (e
->caller
)), e
->caller
->uid
,
429 xstrdup (cgraph_node_name (callee
)), callee
->uid
,
433 else if (growth
> PARAM_VALUE (PARAM_EARLY_INLINING_INSNS
))
436 fprintf (dump_file
, " will not early inline: %s/%i->%s/%i, "
437 "growth %i exceeds --param early-inlining-insns\n",
438 xstrdup (cgraph_node_name (e
->caller
)), e
->caller
->uid
,
439 xstrdup (cgraph_node_name (callee
)), callee
->uid
,
443 else if ((n
= num_calls (callee
)) != 0
444 && growth
* (n
+ 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS
))
447 fprintf (dump_file
, " will not early inline: %s/%i->%s/%i, "
448 "growth %i exceeds --param early-inlining-insns "
449 "divided by number of calls\n",
450 xstrdup (cgraph_node_name (e
->caller
)), e
->caller
->uid
,
451 xstrdup (cgraph_node_name (callee
)), callee
->uid
,
459 /* Compute time of the edge->caller + edge->callee execution when inlining
463 compute_uninlined_call_time (struct inline_summary
*callee_info
,
464 struct cgraph_edge
*edge
)
466 gcov_type uninlined_call_time
=
467 RDIV ((gcov_type
)callee_info
->time
* MAX (edge
->frequency
, 1),
469 gcov_type caller_time
= inline_summary (edge
->caller
->global
.inlined_to
470 ? edge
->caller
->global
.inlined_to
471 : edge
->caller
)->time
;
472 return uninlined_call_time
+ caller_time
;
475 /* Same as compute_uinlined_call_time but compute time when inlining
479 compute_inlined_call_time (struct cgraph_edge
*edge
,
482 gcov_type caller_time
= inline_summary (edge
->caller
->global
.inlined_to
483 ? edge
->caller
->global
.inlined_to
484 : edge
->caller
)->time
;
485 gcov_type time
= (caller_time
486 + RDIV (((gcov_type
) edge_time
487 - inline_edge_summary (edge
)->call_stmt_time
)
488 * MAX (edge
->frequency
, 1), CGRAPH_FREQ_BASE
));
489 /* Possible one roundoff error, but watch for overflows. */
490 gcc_checking_assert (time
>= INT_MIN
/ 2);
496 /* Return true if the speedup for inlining E is bigger than
497 PARAM_MAX_INLINE_MIN_SPEEDUP. */
500 big_speedup_p (struct cgraph_edge
*e
)
502 gcov_type time
= compute_uninlined_call_time (inline_summary (e
->callee
),
504 gcov_type inlined_time
= compute_inlined_call_time (e
,
505 estimate_edge_time (e
));
506 if (time
- inlined_time
507 > RDIV (time
* PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP
), 100))
512 /* Return true if we are interested in inlining small function.
513 When REPORT is true, report reason to dump file. */
516 want_inline_small_function_p (struct cgraph_edge
*e
, bool report
)
518 bool want_inline
= true;
519 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
521 if (DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
))
523 else if (!DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
524 && !flag_inline_small_functions
)
526 e
->inline_failed
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
531 int growth
= estimate_edge_growth (e
);
532 inline_hints hints
= estimate_edge_hints (e
);
533 bool big_speedup
= big_speedup_p (e
);
537 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
538 hints suggests that inlining given function is very profitable. */
539 else if (DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
540 && growth
>= MAX_INLINE_INSNS_SINGLE
542 && !(hints
& (INLINE_HINT_indirect_call
543 | INLINE_HINT_loop_iterations
544 | INLINE_HINT_array_index
545 | INLINE_HINT_loop_stride
)))
547 e
->inline_failed
= CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
;
550 /* Before giving up based on fact that caller size will grow, allow
551 functions that are called few times and eliminating the offline
552 copy will lead to overall code size reduction.
553 Not all of these will be handled by subsequent inlining of functions
554 called once: in particular weak functions are not handled or funcitons
555 that inline to multiple calls but a lot of bodies is optimized out.
556 Finally we want to inline earlier to allow inlining of callbacks.
558 This is slightly wrong on aggressive side: it is entirely possible
559 that function is called many times with a context where inlining
560 reduces code size and few times with a context where inlining increase
561 code size. Resoluting growth estimate will be negative even if it
562 would make more sense to keep offline copy and do not inline into the
563 call sites that makes the code size grow.
565 When badness orders the calls in a way that code reducing calls come
566 first, this situation is not a problem at all: after inlining all
567 "good" calls, we will realize that keeping the function around is
569 else if (growth
<= MAX_INLINE_INSNS_SINGLE
570 /* Unlike for functions called once, we play unsafe with
571 COMDATs. We can allow that since we know functions
572 in consideration are small (and thus risk is small) and
573 moreover grow estimates already accounts that COMDAT
574 functions may or may not disappear when eliminated from
575 current unit. With good probability making aggressive
576 choice in all units is going to make overall program
579 Consequently we ask cgraph_can_remove_if_no_direct_calls_p
581 cgraph_will_be_removed_from_program_if_no_direct_calls */
582 && !DECL_EXTERNAL (callee
->symbol
.decl
)
583 && cgraph_can_remove_if_no_direct_calls_p (callee
)
584 && estimate_growth (callee
) <= 0)
586 else if (!DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
587 && !flag_inline_functions
)
589 e
->inline_failed
= CIF_NOT_DECLARED_INLINED
;
592 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
593 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
594 inlining given function is very profitable. */
595 else if (!DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
597 && growth
>= ((hints
& (INLINE_HINT_indirect_call
598 | INLINE_HINT_loop_iterations
599 | INLINE_HINT_array_index
600 | INLINE_HINT_loop_stride
))
601 ? MAX (MAX_INLINE_INSNS_AUTO
,
602 MAX_INLINE_INSNS_SINGLE
)
603 : MAX_INLINE_INSNS_AUTO
))
605 e
->inline_failed
= CIF_MAX_INLINE_INSNS_AUTO_LIMIT
;
608 /* If call is cold, do not inline when function body would grow. */
609 else if (!cgraph_maybe_hot_edge_p (e
))
611 e
->inline_failed
= CIF_UNLIKELY_CALL
;
615 if (!want_inline
&& report
)
616 report_inline_failed_reason (e
);
620 /* EDGE is self recursive edge.
621 We hand two cases - when function A is inlining into itself
622 or when function A is being inlined into another inliner copy of function
625 In first case OUTER_NODE points to the toplevel copy of A, while
626 in the second case OUTER_NODE points to the outermost copy of A in B.
628 In both cases we want to be extra selective since
629 inlining the call will just introduce new recursive calls to appear. */
632 want_inline_self_recursive_call_p (struct cgraph_edge
*edge
,
633 struct cgraph_node
*outer_node
,
637 char const *reason
= NULL
;
638 bool want_inline
= true;
639 int caller_freq
= CGRAPH_FREQ_BASE
;
640 int max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO
);
642 if (DECL_DECLARED_INLINE_P (edge
->caller
->symbol
.decl
))
643 max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH
);
645 if (!cgraph_maybe_hot_edge_p (edge
))
647 reason
= "recursive call is cold";
650 else if (max_count
&& !outer_node
->count
)
652 reason
= "not executed in profile";
655 else if (depth
> max_depth
)
657 reason
= "--param max-inline-recursive-depth exceeded.";
661 if (outer_node
->global
.inlined_to
)
662 caller_freq
= outer_node
->callers
->frequency
;
666 /* Inlining of self recursive function into copy of itself within other function
667 is transformation similar to loop peeling.
669 Peeling is profitable if we can inline enough copies to make probability
670 of actual call to the self recursive function very small. Be sure that
671 the probability of recursion is small.
673 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
674 This way the expected number of recision is at most max_depth. */
677 int max_prob
= CGRAPH_FREQ_BASE
- ((CGRAPH_FREQ_BASE
+ max_depth
- 1)
680 for (i
= 1; i
< depth
; i
++)
681 max_prob
= max_prob
* max_prob
/ CGRAPH_FREQ_BASE
;
683 && (edge
->count
* CGRAPH_FREQ_BASE
/ outer_node
->count
686 reason
= "profile of recursive call is too large";
690 && (edge
->frequency
* CGRAPH_FREQ_BASE
/ caller_freq
693 reason
= "frequency of recursive call is too large";
697 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
698 depth is large. We reduce function call overhead and increase chances that
699 things fit in hardware return predictor.
701 Recursive inlining might however increase cost of stack frame setup
702 actually slowing down functions whose recursion tree is wide rather than
705 Deciding reliably on when to do recursive inlining without profile feedback
706 is tricky. For now we disable recursive inlining when probability of self
709 Recursive inlining of self recursive call within loop also results in large loop
710 depths that generally optimize badly. We may want to throttle down inlining
711 in those cases. In particular this seems to happen in one of libstdc++ rb tree
716 && (edge
->count
* 100 / outer_node
->count
717 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY
)))
719 reason
= "profile of recursive call is too small";
723 && (edge
->frequency
* 100 / caller_freq
724 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY
)))
726 reason
= "frequency of recursive call is too small";
730 if (!want_inline
&& dump_file
)
731 fprintf (dump_file
, " not inlining recursively: %s\n", reason
);
735 /* Return true when NODE has caller other than EDGE.
736 Worker for cgraph_for_node_and_aliases. */
739 check_caller_edge (struct cgraph_node
*node
, void *edge
)
741 return (node
->callers
742 && node
->callers
!= edge
);
746 /* Decide if inlining NODE would reduce unit size by eliminating
747 the offline copy of function.
748 When COLD is true the cold calls are considered, too. */
751 want_inline_function_to_all_callers_p (struct cgraph_node
*node
, bool cold
)
753 struct cgraph_node
*function
= cgraph_function_or_thunk_node (node
, NULL
);
754 struct cgraph_edge
*e
;
755 bool has_hot_call
= false;
757 /* Does it have callers? */
760 /* Already inlined? */
761 if (function
->global
.inlined_to
)
763 if (cgraph_function_or_thunk_node (node
, NULL
) != node
)
765 /* Inlining into all callers would increase size? */
766 if (estimate_growth (node
) > 0)
768 /* Maybe other aliases has more direct calls. */
769 if (cgraph_for_node_and_aliases (node
, check_caller_edge
, node
->callers
, true))
771 /* All inlines must be possible. */
772 for (e
= node
->callers
; e
; e
= e
->next_caller
)
774 if (!can_inline_edge_p (e
, true))
776 if (!has_hot_call
&& cgraph_maybe_hot_edge_p (e
))
780 if (!cold
&& !has_hot_call
)
785 #define RELATIVE_TIME_BENEFIT_RANGE (INT_MAX / 64)
787 /* Return relative time improvement for inlining EDGE in range
788 1...RELATIVE_TIME_BENEFIT_RANGE */
791 relative_time_benefit (struct inline_summary
*callee_info
,
792 struct cgraph_edge
*edge
,
795 gcov_type relbenefit
;
796 gcov_type uninlined_call_time
= compute_uninlined_call_time (callee_info
, edge
);
797 gcov_type inlined_call_time
= compute_inlined_call_time (edge
, edge_time
);
799 /* Inlining into extern inline function is not a win. */
800 if (DECL_EXTERNAL (edge
->caller
->global
.inlined_to
801 ? edge
->caller
->global
.inlined_to
->symbol
.decl
802 : edge
->caller
->symbol
.decl
))
805 /* Watch overflows. */
806 gcc_checking_assert (uninlined_call_time
>= 0);
807 gcc_checking_assert (inlined_call_time
>= 0);
808 gcc_checking_assert (uninlined_call_time
>= inlined_call_time
);
810 /* Compute relative time benefit, i.e. how much the call becomes faster.
811 ??? perhaps computing how much the caller+calle together become faster
812 would lead to more realistic results. */
813 if (!uninlined_call_time
)
814 uninlined_call_time
= 1;
816 RDIV (((gcov_type
)uninlined_call_time
- inlined_call_time
) * RELATIVE_TIME_BENEFIT_RANGE
,
817 uninlined_call_time
);
818 relbenefit
= MIN (relbenefit
, RELATIVE_TIME_BENEFIT_RANGE
);
819 gcc_checking_assert (relbenefit
>= 0);
820 relbenefit
= MAX (relbenefit
, 1);
825 /* A cost model driving the inlining heuristics in a way so the edges with
826 smallest badness are inlined first. After each inlining is performed
827 the costs of all caller edges of nodes affected are recomputed so the
828 metrics may accurately depend on values such as number of inlinable callers
829 of the function or function body size. */
832 edge_badness (struct cgraph_edge
*edge
, bool dump
)
835 int growth
, edge_time
;
836 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (edge
->callee
,
838 struct inline_summary
*callee_info
= inline_summary (callee
);
841 if (DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
))
844 growth
= estimate_edge_growth (edge
);
845 edge_time
= estimate_edge_time (edge
);
846 hints
= estimate_edge_hints (edge
);
847 gcc_checking_assert (edge_time
>= 0);
848 gcc_checking_assert (edge_time
<= callee_info
->time
);
849 gcc_checking_assert (growth
<= callee_info
->size
);
853 fprintf (dump_file
, " Badness calculation for %s/%i -> %s/%i\n",
854 xstrdup (cgraph_node_name (edge
->caller
)),
856 xstrdup (cgraph_node_name (callee
)),
858 fprintf (dump_file
, " size growth %i, time %i ",
861 dump_inline_hints (dump_file
, hints
);
862 if (big_speedup_p (edge
))
863 fprintf (dump_file
, " big_speedup");
864 fprintf (dump_file
, "\n");
867 /* Always prefer inlining saving code size. */
870 badness
= INT_MIN
/ 2 + growth
;
872 fprintf (dump_file
, " %i: Growth %i <= 0\n", (int) badness
,
876 /* When profiling is available, compute badness as:
878 relative_edge_count * relative_time_benefit
879 goodness = -------------------------------------------
883 The fraction is upside down, because on edge counts and time beneits
884 the bounds are known. Edge growth is essentially unlimited. */
888 int relbenefit
= relative_time_benefit (callee_info
, edge
, edge_time
);
891 ((double) edge
->count
* INT_MIN
/ 2 / max_count
/ RELATIVE_TIME_BENEFIT_RANGE
) *
892 relbenefit
) / growth
;
894 /* Be sure that insanity of the profile won't lead to increasing counts
895 in the scalling and thus to overflow in the computation above. */
896 gcc_assert (max_count
>= edge
->count
);
900 " %i (relative %f): profile info. Relative count %f"
901 " * Relative benefit %f\n",
902 (int) badness
, (double) badness
/ INT_MIN
,
903 (double) edge
->count
/ max_count
,
904 relbenefit
* 100.0 / RELATIVE_TIME_BENEFIT_RANGE
);
908 /* When function local profile is available. Compute badness as:
910 relative_time_benefit
911 goodness = ---------------------------------
912 growth_of_caller * overall_growth
916 compensated by the inline hints.
918 else if (flag_guess_branch_prob
)
920 badness
= (relative_time_benefit (callee_info
, edge
, edge_time
)
921 * (INT_MIN
/ 16 / RELATIVE_TIME_BENEFIT_RANGE
));
922 badness
/= (MIN (65536/2, growth
) * MIN (65536/2, MAX (1, callee_info
->growth
)));
923 gcc_checking_assert (badness
<=0 && badness
>= INT_MIN
/ 16);
924 if ((hints
& (INLINE_HINT_indirect_call
925 | INLINE_HINT_loop_iterations
926 | INLINE_HINT_array_index
927 | INLINE_HINT_loop_stride
))
928 || callee_info
->growth
<= 0)
930 if (hints
& (INLINE_HINT_same_scc
))
932 else if (hints
& (INLINE_HINT_in_scc
))
934 else if (hints
& (INLINE_HINT_cross_module
))
936 gcc_checking_assert (badness
<= 0 && badness
>= INT_MIN
/ 2);
937 if ((hints
& INLINE_HINT_declared_inline
) && badness
>= INT_MIN
/ 32)
942 " %i: guessed profile. frequency %f,"
943 " benefit %f%%, time w/o inlining %i, time w inlining %i"
944 " overall growth %i (current) %i (original)\n",
945 (int) badness
, (double)edge
->frequency
/ CGRAPH_FREQ_BASE
,
946 relative_time_benefit (callee_info
, edge
, edge_time
) * 100.0
947 / RELATIVE_TIME_BENEFIT_RANGE
,
948 (int)compute_uninlined_call_time (callee_info
, edge
),
949 (int)compute_inlined_call_time (edge
, edge_time
),
950 estimate_growth (callee
),
951 callee_info
->growth
);
954 /* When function local profile is not available or it does not give
955 useful information (ie frequency is zero), base the cost on
956 loop nest and overall size growth, so we optimize for overall number
957 of functions fully inlined in program. */
960 int nest
= MIN (inline_edge_summary (edge
)->loop_depth
, 8);
961 badness
= growth
* 256;
963 /* Decrease badness if call is nested. */
971 fprintf (dump_file
, " %i: no profile. nest %i\n", (int) badness
,
975 /* Ensure that we did not overflow in all the fixed point math above. */
976 gcc_assert (badness
>= INT_MIN
);
977 gcc_assert (badness
<= INT_MAX
- 1);
978 /* Make recursive inlining happen always after other inlining is done. */
979 if (cgraph_edge_recursive_p (edge
))
985 /* Recompute badness of EDGE and update its key in HEAP if needed. */
987 update_edge_key (fibheap_t heap
, struct cgraph_edge
*edge
)
989 int badness
= edge_badness (edge
, false);
992 fibnode_t n
= (fibnode_t
) edge
->aux
;
993 gcc_checking_assert (n
->data
== edge
);
995 /* fibheap_replace_key only decrease the keys.
996 When we increase the key we do not update heap
997 and instead re-insert the element once it becomes
998 a minimum of heap. */
999 if (badness
< n
->key
)
1001 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1004 " decreasing badness %s/%i -> %s/%i, %i to %i\n",
1005 xstrdup (cgraph_node_name (edge
->caller
)),
1007 xstrdup (cgraph_node_name (edge
->callee
)),
1012 fibheap_replace_key (heap
, n
, badness
);
1013 gcc_checking_assert (n
->key
== badness
);
1018 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1021 " enqueuing call %s/%i -> %s/%i, badness %i\n",
1022 xstrdup (cgraph_node_name (edge
->caller
)),
1024 xstrdup (cgraph_node_name (edge
->callee
)),
1028 edge
->aux
= fibheap_insert (heap
, badness
, edge
);
1033 /* NODE was inlined.
1034 All caller edges needs to be resetted because
1035 size estimates change. Similarly callees needs reset
1036 because better context may be known. */
1039 reset_edge_caches (struct cgraph_node
*node
)
1041 struct cgraph_edge
*edge
;
1042 struct cgraph_edge
*e
= node
->callees
;
1043 struct cgraph_node
*where
= node
;
1045 struct ipa_ref
*ref
;
1047 if (where
->global
.inlined_to
)
1048 where
= where
->global
.inlined_to
;
1050 /* WHERE body size has changed, the cached growth is invalid. */
1051 reset_node_growth_cache (where
);
1053 for (edge
= where
->callers
; edge
; edge
= edge
->next_caller
)
1054 if (edge
->inline_failed
)
1055 reset_edge_growth_cache (edge
);
1056 for (i
= 0; ipa_ref_list_referring_iterate (&where
->symbol
.ref_list
,
1058 if (ref
->use
== IPA_REF_ALIAS
)
1059 reset_edge_caches (ipa_ref_referring_node (ref
));
1065 if (!e
->inline_failed
&& e
->callee
->callees
)
1066 e
= e
->callee
->callees
;
1069 if (e
->inline_failed
)
1070 reset_edge_growth_cache (e
);
1077 if (e
->caller
== node
)
1079 e
= e
->caller
->callers
;
1081 while (!e
->next_callee
);
1087 /* Recompute HEAP nodes for each of caller of NODE.
1088 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1089 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1090 it is inlinable. Otherwise check all edges. */
1093 update_caller_keys (fibheap_t heap
, struct cgraph_node
*node
,
1094 bitmap updated_nodes
,
1095 struct cgraph_edge
*check_inlinablity_for
)
1097 struct cgraph_edge
*edge
;
1099 struct ipa_ref
*ref
;
1101 if ((!node
->alias
&& !inline_summary (node
)->inlinable
)
1102 || cgraph_function_body_availability (node
) <= AVAIL_OVERWRITABLE
1103 || node
->global
.inlined_to
)
1105 if (!bitmap_set_bit (updated_nodes
, node
->uid
))
1108 for (i
= 0; ipa_ref_list_referring_iterate (&node
->symbol
.ref_list
,
1110 if (ref
->use
== IPA_REF_ALIAS
)
1112 struct cgraph_node
*alias
= ipa_ref_referring_node (ref
);
1113 update_caller_keys (heap
, alias
, updated_nodes
, check_inlinablity_for
);
1116 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1117 if (edge
->inline_failed
)
1119 if (!check_inlinablity_for
1120 || check_inlinablity_for
== edge
)
1122 if (can_inline_edge_p (edge
, false)
1123 && want_inline_small_function_p (edge
, false))
1124 update_edge_key (heap
, edge
);
1127 report_inline_failed_reason (edge
);
1128 fibheap_delete_node (heap
, (fibnode_t
) edge
->aux
);
1133 update_edge_key (heap
, edge
);
1137 /* Recompute HEAP nodes for each uninlined call in NODE.
1138 This is used when we know that edge badnesses are going only to increase
1139 (we introduced new call site) and thus all we need is to insert newly
1140 created edges into heap. */
1143 update_callee_keys (fibheap_t heap
, struct cgraph_node
*node
,
1144 bitmap updated_nodes
)
1146 struct cgraph_edge
*e
= node
->callees
;
1151 if (!e
->inline_failed
&& e
->callee
->callees
)
1152 e
= e
->callee
->callees
;
1155 enum availability avail
;
1156 struct cgraph_node
*callee
;
1157 /* We do not reset callee growth cache here. Since we added a new call,
1158 growth chould have just increased and consequentely badness metric
1159 don't need updating. */
1160 if (e
->inline_failed
1161 && (callee
= cgraph_function_or_thunk_node (e
->callee
, &avail
))
1162 && inline_summary (callee
)->inlinable
1163 && cgraph_function_body_availability (callee
) >= AVAIL_AVAILABLE
1164 && !bitmap_bit_p (updated_nodes
, callee
->uid
))
1166 if (can_inline_edge_p (e
, false)
1167 && want_inline_small_function_p (e
, false))
1168 update_edge_key (heap
, e
);
1171 report_inline_failed_reason (e
);
1172 fibheap_delete_node (heap
, (fibnode_t
) e
->aux
);
1182 if (e
->caller
== node
)
1184 e
= e
->caller
->callers
;
1186 while (!e
->next_callee
);
1192 /* Enqueue all recursive calls from NODE into priority queue depending on
1193 how likely we want to recursively inline the call. */
1196 lookup_recursive_calls (struct cgraph_node
*node
, struct cgraph_node
*where
,
1199 struct cgraph_edge
*e
;
1200 enum availability avail
;
1202 for (e
= where
->callees
; e
; e
= e
->next_callee
)
1203 if (e
->callee
== node
1204 || (cgraph_function_or_thunk_node (e
->callee
, &avail
) == node
1205 && avail
> AVAIL_OVERWRITABLE
))
1207 /* When profile feedback is available, prioritize by expected number
1209 fibheap_insert (heap
,
1210 !max_count
? -e
->frequency
1211 : -(e
->count
/ ((max_count
+ (1<<24) - 1) / (1<<24))),
1214 for (e
= where
->callees
; e
; e
= e
->next_callee
)
1215 if (!e
->inline_failed
)
1216 lookup_recursive_calls (node
, e
->callee
, heap
);
1219 /* Decide on recursive inlining: in the case function has recursive calls,
1220 inline until body size reaches given argument. If any new indirect edges
1221 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1225 recursive_inlining (struct cgraph_edge
*edge
,
1226 vec
<cgraph_edge_p
> *new_edges
)
1228 int limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO
);
1230 struct cgraph_node
*node
;
1231 struct cgraph_edge
*e
;
1232 struct cgraph_node
*master_clone
= NULL
, *next
;
1236 node
= edge
->caller
;
1237 if (node
->global
.inlined_to
)
1238 node
= node
->global
.inlined_to
;
1240 if (DECL_DECLARED_INLINE_P (node
->symbol
.decl
))
1241 limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE
);
1243 /* Make sure that function is small enough to be considered for inlining. */
1244 if (estimate_size_after_inlining (node
, edge
) >= limit
)
1246 heap
= fibheap_new ();
1247 lookup_recursive_calls (node
, node
, heap
);
1248 if (fibheap_empty (heap
))
1250 fibheap_delete (heap
);
1256 " Performing recursive inlining on %s\n",
1257 cgraph_node_name (node
));
1259 /* Do the inlining and update list of recursive call during process. */
1260 while (!fibheap_empty (heap
))
1262 struct cgraph_edge
*curr
1263 = (struct cgraph_edge
*) fibheap_extract_min (heap
);
1264 struct cgraph_node
*cnode
, *dest
= curr
->callee
;
1266 if (!can_inline_edge_p (curr
, true))
1269 /* MASTER_CLONE is produced in the case we already started modified
1270 the function. Be sure to redirect edge to the original body before
1271 estimating growths otherwise we will be seeing growths after inlining
1272 the already modified body. */
1275 cgraph_redirect_edge_callee (curr
, master_clone
);
1276 reset_edge_growth_cache (curr
);
1279 if (estimate_size_after_inlining (node
, curr
) > limit
)
1281 cgraph_redirect_edge_callee (curr
, dest
);
1282 reset_edge_growth_cache (curr
);
1287 for (cnode
= curr
->caller
;
1288 cnode
->global
.inlined_to
; cnode
= cnode
->callers
->caller
)
1289 if (node
->symbol
.decl
1290 == cgraph_function_or_thunk_node (curr
->callee
, NULL
)->symbol
.decl
)
1293 if (!want_inline_self_recursive_call_p (curr
, node
, false, depth
))
1295 cgraph_redirect_edge_callee (curr
, dest
);
1296 reset_edge_growth_cache (curr
);
1303 " Inlining call of depth %i", depth
);
1306 fprintf (dump_file
, " called approx. %.2f times per call",
1307 (double)curr
->count
/ node
->count
);
1309 fprintf (dump_file
, "\n");
1313 /* We need original clone to copy around. */
1314 master_clone
= cgraph_clone_node (node
, node
->symbol
.decl
,
1315 node
->count
, CGRAPH_FREQ_BASE
,
1316 false, vec
<cgraph_edge_p
>(),
1318 for (e
= master_clone
->callees
; e
; e
= e
->next_callee
)
1319 if (!e
->inline_failed
)
1320 clone_inlined_nodes (e
, true, false, NULL
);
1321 cgraph_redirect_edge_callee (curr
, master_clone
);
1322 reset_edge_growth_cache (curr
);
1325 inline_call (curr
, false, new_edges
, &overall_size
, true);
1326 lookup_recursive_calls (node
, curr
->callee
, heap
);
1330 if (!fibheap_empty (heap
) && dump_file
)
1331 fprintf (dump_file
, " Recursive inlining growth limit met.\n");
1332 fibheap_delete (heap
);
1339 "\n Inlined %i times, "
1340 "body grown from size %i to %i, time %i to %i\n", n
,
1341 inline_summary (master_clone
)->size
, inline_summary (node
)->size
,
1342 inline_summary (master_clone
)->time
, inline_summary (node
)->time
);
1344 /* Remove master clone we used for inlining. We rely that clones inlined
1345 into master clone gets queued just before master clone so we don't
1347 for (node
= cgraph_first_function (); node
!= master_clone
;
1350 next
= cgraph_next_function (node
);
1351 if (node
->global
.inlined_to
== master_clone
)
1352 cgraph_remove_node (node
);
1354 cgraph_remove_node (master_clone
);
1359 /* Given whole compilation unit estimate of INSNS, compute how large we can
1360 allow the unit to grow. */
1363 compute_max_insns (int insns
)
1365 int max_insns
= insns
;
1366 if (max_insns
< PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
))
1367 max_insns
= PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
);
1369 return ((HOST_WIDEST_INT
) max_insns
1370 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH
)) / 100);
1374 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1377 add_new_edges_to_heap (fibheap_t heap
, vec
<cgraph_edge_p
> new_edges
)
1379 while (new_edges
.length () > 0)
1381 struct cgraph_edge
*edge
= new_edges
.pop ();
1383 gcc_assert (!edge
->aux
);
1384 if (edge
->inline_failed
1385 && can_inline_edge_p (edge
, true)
1386 && want_inline_small_function_p (edge
, true))
1387 edge
->aux
= fibheap_insert (heap
, edge_badness (edge
, false), edge
);
1392 /* We use greedy algorithm for inlining of small functions:
1393 All inline candidates are put into prioritized heap ordered in
1396 The inlining of small functions is bounded by unit growth parameters. */
1399 inline_small_functions (void)
1401 struct cgraph_node
*node
;
1402 struct cgraph_edge
*edge
;
1403 fibheap_t edge_heap
= fibheap_new ();
1404 bitmap updated_nodes
= BITMAP_ALLOC (NULL
);
1405 int min_size
, max_size
;
1406 vec
<cgraph_edge_p
> new_indirect_edges
= vec
<cgraph_edge_p
>();
1407 int initial_size
= 0;
1408 struct cgraph_node
**order
= XCNEWVEC (struct cgraph_node
*, cgraph_n_nodes
);
1410 if (flag_indirect_inlining
)
1411 new_indirect_edges
.create (8);
1413 /* Compute overall unit size and other global parameters used by badness
1417 ipa_reduced_postorder (order
, true, true, NULL
);
1420 FOR_EACH_DEFINED_FUNCTION (node
)
1421 if (!node
->global
.inlined_to
)
1423 if (cgraph_function_with_gimple_body_p (node
)
1424 || node
->thunk
.thunk_p
)
1426 struct inline_summary
*info
= inline_summary (node
);
1427 struct ipa_dfs_info
*dfs
= (struct ipa_dfs_info
*) node
->symbol
.aux
;
1429 if (!DECL_EXTERNAL (node
->symbol
.decl
))
1430 initial_size
+= info
->size
;
1431 info
->growth
= estimate_growth (node
);
1432 if (dfs
&& dfs
->next_cycle
)
1434 struct cgraph_node
*n2
;
1435 int id
= dfs
->scc_no
+ 1;
1437 n2
= ((struct ipa_dfs_info
*) node
->symbol
.aux
)->next_cycle
)
1439 struct inline_summary
*info2
= inline_summary (n2
);
1447 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1448 if (max_count
< edge
->count
)
1449 max_count
= edge
->count
;
1451 ipa_free_postorder_info ();
1452 initialize_growth_caches ();
1456 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1459 overall_size
= initial_size
;
1460 max_size
= compute_max_insns (overall_size
);
1461 min_size
= overall_size
;
1463 /* Populate the heeap with all edges we might inline. */
1465 FOR_EACH_DEFINED_FUNCTION (node
)
1466 if (!node
->global
.inlined_to
)
1469 fprintf (dump_file
, "Enqueueing calls of %s/%i.\n",
1470 cgraph_node_name (node
), node
->uid
);
1472 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1473 if (edge
->inline_failed
1474 && can_inline_edge_p (edge
, true)
1475 && want_inline_small_function_p (edge
, true)
1476 && edge
->inline_failed
)
1478 gcc_assert (!edge
->aux
);
1479 update_edge_key (edge_heap
, edge
);
1483 gcc_assert (in_lto_p
1485 || (profile_info
&& flag_branch_probabilities
));
1487 while (!fibheap_empty (edge_heap
))
1489 int old_size
= overall_size
;
1490 struct cgraph_node
*where
, *callee
;
1491 int badness
= fibheap_min_key (edge_heap
);
1492 int current_badness
;
1496 edge
= (struct cgraph_edge
*) fibheap_extract_min (edge_heap
);
1497 gcc_assert (edge
->aux
);
1499 if (!edge
->inline_failed
)
1502 /* Be sure that caches are maintained consistent.
1503 We can not make this ENABLE_CHECKING only because it cause different
1504 updates of the fibheap queue. */
1505 cached_badness
= edge_badness (edge
, false);
1506 reset_edge_growth_cache (edge
);
1507 reset_node_growth_cache (edge
->callee
);
1509 /* When updating the edge costs, we only decrease badness in the keys.
1510 Increases of badness are handled lazilly; when we see key with out
1511 of date value on it, we re-insert it now. */
1512 current_badness
= edge_badness (edge
, false);
1513 gcc_assert (cached_badness
== current_badness
);
1514 gcc_assert (current_badness
>= badness
);
1515 if (current_badness
!= badness
)
1517 edge
->aux
= fibheap_insert (edge_heap
, current_badness
, edge
);
1521 if (!can_inline_edge_p (edge
, true))
1524 callee
= cgraph_function_or_thunk_node (edge
->callee
, NULL
);
1525 growth
= estimate_edge_growth (edge
);
1529 "\nConsidering %s with %i size\n",
1530 cgraph_node_name (callee
),
1531 inline_summary (callee
)->size
);
1533 " to be inlined into %s in %s:%i\n"
1534 " Estimated growth after inlined into all is %+i insns.\n"
1535 " Estimated badness is %i, frequency %.2f.\n",
1536 cgraph_node_name (edge
->caller
),
1537 flag_wpa
? "unknown"
1538 : gimple_filename ((const_gimple
) edge
->call_stmt
),
1540 : gimple_lineno ((const_gimple
) edge
->call_stmt
),
1541 estimate_growth (callee
),
1543 edge
->frequency
/ (double)CGRAPH_FREQ_BASE
);
1545 fprintf (dump_file
," Called "HOST_WIDEST_INT_PRINT_DEC
"x\n",
1547 if (dump_flags
& TDF_DETAILS
)
1548 edge_badness (edge
, true);
1551 if (overall_size
+ growth
> max_size
1552 && !DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
))
1554 edge
->inline_failed
= CIF_INLINE_UNIT_GROWTH_LIMIT
;
1555 report_inline_failed_reason (edge
);
1559 if (!want_inline_small_function_p (edge
, true))
1562 /* Heuristics for inlining small functions works poorly for
1563 recursive calls where we do efect similar to loop unrolling.
1564 When inliing such edge seems profitable, leave decision on
1565 specific inliner. */
1566 if (cgraph_edge_recursive_p (edge
))
1568 where
= edge
->caller
;
1569 if (where
->global
.inlined_to
)
1570 where
= where
->global
.inlined_to
;
1571 if (!recursive_inlining (edge
,
1572 flag_indirect_inlining
1573 ? &new_indirect_edges
: NULL
))
1575 edge
->inline_failed
= CIF_RECURSIVE_INLINING
;
1578 reset_edge_caches (where
);
1579 /* Recursive inliner inlines all recursive calls of the function
1580 at once. Consequently we need to update all callee keys. */
1581 if (flag_indirect_inlining
)
1582 add_new_edges_to_heap (edge_heap
, new_indirect_edges
);
1583 update_callee_keys (edge_heap
, where
, updated_nodes
);
1587 struct cgraph_node
*outer_node
= NULL
;
1590 /* Consider the case where self recursive function A is inlined into B.
1591 This is desired optimization in some cases, since it leads to effect
1592 similar of loop peeling and we might completely optimize out the
1593 recursive call. However we must be extra selective. */
1595 where
= edge
->caller
;
1596 while (where
->global
.inlined_to
)
1598 if (where
->symbol
.decl
== callee
->symbol
.decl
)
1599 outer_node
= where
, depth
++;
1600 where
= where
->callers
->caller
;
1603 && !want_inline_self_recursive_call_p (edge
, outer_node
,
1607 = (DECL_DISREGARD_INLINE_LIMITS (edge
->callee
->symbol
.decl
)
1608 ? CIF_RECURSIVE_INLINING
: CIF_UNSPECIFIED
);
1611 else if (depth
&& dump_file
)
1612 fprintf (dump_file
, " Peeling recursion with depth %i\n", depth
);
1614 gcc_checking_assert (!callee
->global
.inlined_to
);
1615 inline_call (edge
, true, &new_indirect_edges
, &overall_size
, true);
1616 if (flag_indirect_inlining
)
1617 add_new_edges_to_heap (edge_heap
, new_indirect_edges
);
1619 reset_edge_caches (edge
->callee
);
1620 reset_node_growth_cache (callee
);
1622 update_callee_keys (edge_heap
, where
, updated_nodes
);
1624 where
= edge
->caller
;
1625 if (where
->global
.inlined_to
)
1626 where
= where
->global
.inlined_to
;
1628 /* Our profitability metric can depend on local properties
1629 such as number of inlinable calls and size of the function body.
1630 After inlining these properties might change for the function we
1631 inlined into (since it's body size changed) and for the functions
1632 called by function we inlined (since number of it inlinable callers
1634 update_caller_keys (edge_heap
, where
, updated_nodes
, NULL
);
1635 bitmap_clear (updated_nodes
);
1640 " Inlined into %s which now has time %i and size %i,"
1641 "net change of %+i.\n",
1642 cgraph_node_name (edge
->caller
),
1643 inline_summary (edge
->caller
)->time
,
1644 inline_summary (edge
->caller
)->size
,
1645 overall_size
- old_size
);
1647 if (min_size
> overall_size
)
1649 min_size
= overall_size
;
1650 max_size
= compute_max_insns (min_size
);
1653 fprintf (dump_file
, "New minimal size reached: %i\n", min_size
);
1657 free_growth_caches ();
1658 new_indirect_edges
.release ();
1659 fibheap_delete (edge_heap
);
1662 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1663 initial_size
, overall_size
,
1664 initial_size
? overall_size
* 100 / (initial_size
) - 100: 0);
1665 BITMAP_FREE (updated_nodes
);
1668 /* Flatten NODE. Performed both during early inlining and
1669 at IPA inlining time. */
1672 flatten_function (struct cgraph_node
*node
, bool early
)
1674 struct cgraph_edge
*e
;
1676 /* We shouldn't be called recursively when we are being processed. */
1677 gcc_assert (node
->symbol
.aux
== NULL
);
1679 node
->symbol
.aux
= (void *) node
;
1681 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1683 struct cgraph_node
*orig_callee
;
1684 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
1686 /* We've hit cycle? It is time to give up. */
1687 if (callee
->symbol
.aux
)
1691 "Not inlining %s into %s to avoid cycle.\n",
1692 xstrdup (cgraph_node_name (callee
)),
1693 xstrdup (cgraph_node_name (e
->caller
)));
1694 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
1698 /* When the edge is already inlined, we just need to recurse into
1699 it in order to fully flatten the leaves. */
1700 if (!e
->inline_failed
)
1702 flatten_function (callee
, early
);
1706 /* Flatten attribute needs to be processed during late inlining. For
1707 extra code quality we however do flattening during early optimization,
1710 ? !can_inline_edge_p (e
, true)
1711 : !can_early_inline_edge_p (e
))
1714 if (cgraph_edge_recursive_p (e
))
1717 fprintf (dump_file
, "Not inlining: recursive call.\n");
1721 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node
->symbol
.decl
))
1722 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee
->symbol
.decl
)))
1725 fprintf (dump_file
, "Not inlining: SSA form does not match.\n");
1729 /* Inline the edge and flatten the inline clone. Avoid
1730 recursing through the original node if the node was cloned. */
1732 fprintf (dump_file
, " Inlining %s into %s.\n",
1733 xstrdup (cgraph_node_name (callee
)),
1734 xstrdup (cgraph_node_name (e
->caller
)));
1735 orig_callee
= callee
;
1736 inline_call (e
, true, NULL
, NULL
, false);
1737 if (e
->callee
!= orig_callee
)
1738 orig_callee
->symbol
.aux
= (void *) node
;
1739 flatten_function (e
->callee
, early
);
1740 if (e
->callee
!= orig_callee
)
1741 orig_callee
->symbol
.aux
= NULL
;
1744 node
->symbol
.aux
= NULL
;
1745 if (!node
->global
.inlined_to
)
1746 inline_update_overall_summary (node
);
1749 /* Decide on the inlining. We do so in the topological order to avoid
1750 expenses on updating data structures. */
1755 struct cgraph_node
*node
;
1757 struct cgraph_node
**order
=
1758 XCNEWVEC (struct cgraph_node
*, cgraph_n_nodes
);
1761 if (in_lto_p
&& optimize
)
1762 ipa_update_after_lto_read ();
1765 dump_inline_summaries (dump_file
);
1767 nnodes
= ipa_reverse_postorder (order
);
1769 FOR_EACH_FUNCTION (node
)
1770 node
->symbol
.aux
= 0;
1773 fprintf (dump_file
, "\nFlattening functions:\n");
1775 /* In the first pass handle functions to be flattened. Do this with
1776 a priority so none of our later choices will make this impossible. */
1777 for (i
= nnodes
- 1; i
>= 0; i
--)
1781 /* Handle nodes to be flattened.
1782 Ideally when processing callees we stop inlining at the
1783 entry of cycles, possibly cloning that entry point and
1784 try to flatten itself turning it into a self-recursive
1786 if (lookup_attribute ("flatten",
1787 DECL_ATTRIBUTES (node
->symbol
.decl
)) != NULL
)
1791 "Flattening %s\n", cgraph_node_name (node
));
1792 flatten_function (node
, false);
1796 inline_small_functions ();
1797 symtab_remove_unreachable_nodes (true, dump_file
);
1800 /* Inline functions with a property that after inlining into all callers the
1801 code size will shrink because the out-of-line copy is eliminated.
1802 We do this regardless on the callee size as long as function growth limits
1804 if (flag_inline_functions_called_once
)
1809 "\nDeciding on functions to be inlined into all callers:\n");
1811 /* Inlining one function called once has good chance of preventing
1812 inlining other function into the same callee. Ideally we should
1813 work in priority order, but probably inlining hot functions first
1814 is good cut without the extra pain of maintaining the queue.
1816 ??? this is not really fitting the bill perfectly: inlining function
1817 into callee often leads to better optimization of callee due to
1818 increased context for optimization.
1819 For example if main() function calls a function that outputs help
1820 and then function that does the main optmization, we should inline
1821 the second with priority even if both calls are cold by themselves.
1823 We probably want to implement new predicate replacing our use of
1824 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
1826 for (cold
= 0; cold
<= 1; cold
++)
1828 FOR_EACH_DEFINED_FUNCTION (node
)
1830 if (want_inline_function_to_all_callers_p (node
, cold
))
1833 struct cgraph_edge
*e
;
1834 for (e
= node
->callers
; e
; e
= e
->next_caller
)
1836 while (node
->callers
&& !node
->global
.inlined_to
)
1838 struct cgraph_node
*caller
= node
->callers
->caller
;
1843 "\nInlining %s size %i.\n",
1844 cgraph_node_name (node
),
1845 inline_summary (node
)->size
);
1847 " Called once from %s %i insns.\n",
1848 cgraph_node_name (node
->callers
->caller
),
1849 inline_summary (node
->callers
->caller
)->size
);
1852 inline_call (node
->callers
, true, NULL
, NULL
, true);
1855 " Inlined into %s which now has %i size\n",
1856 cgraph_node_name (caller
),
1857 inline_summary (caller
)->size
);
1861 fprintf (dump_file
, "New calls found; giving up.\n");
1870 /* Free ipa-prop structures if they are no longer needed. */
1872 ipa_free_all_structures_after_iinln ();
1876 "\nInlined %i calls, eliminated %i functions\n\n",
1877 ncalls_inlined
, nfunctions_inlined
);
1880 dump_inline_summaries (dump_file
);
1881 /* In WPA we use inline summaries for partitioning process. */
1883 inline_free_summary ();
1887 /* Inline always-inline function calls in NODE. */
1890 inline_always_inline_functions (struct cgraph_node
*node
)
1892 struct cgraph_edge
*e
;
1893 bool inlined
= false;
1895 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1897 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
1898 if (!DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
))
1901 if (cgraph_edge_recursive_p (e
))
1904 fprintf (dump_file
, " Not inlining recursive call to %s.\n",
1905 cgraph_node_name (e
->callee
));
1906 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
1910 if (!can_early_inline_edge_p (e
))
1914 fprintf (dump_file
, " Inlining %s into %s (always_inline).\n",
1915 xstrdup (cgraph_node_name (e
->callee
)),
1916 xstrdup (cgraph_node_name (e
->caller
)));
1917 inline_call (e
, true, NULL
, NULL
, false);
1921 inline_update_overall_summary (node
);
1926 /* Decide on the inlining. We do so in the topological order to avoid
1927 expenses on updating data structures. */
1930 early_inline_small_functions (struct cgraph_node
*node
)
1932 struct cgraph_edge
*e
;
1933 bool inlined
= false;
1935 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1937 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
1938 if (!inline_summary (callee
)->inlinable
1939 || !e
->inline_failed
)
1942 /* Do not consider functions not declared inline. */
1943 if (!DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
1944 && !flag_inline_small_functions
1945 && !flag_inline_functions
)
1949 fprintf (dump_file
, "Considering inline candidate %s.\n",
1950 cgraph_node_name (callee
));
1952 if (!can_early_inline_edge_p (e
))
1955 if (cgraph_edge_recursive_p (e
))
1958 fprintf (dump_file
, " Not inlining: recursive call.\n");
1962 if (!want_early_inline_function_p (e
))
1966 fprintf (dump_file
, " Inlining %s into %s.\n",
1967 xstrdup (cgraph_node_name (callee
)),
1968 xstrdup (cgraph_node_name (e
->caller
)));
1969 inline_call (e
, true, NULL
, NULL
, true);
1976 /* Do inlining of small functions. Doing so early helps profiling and other
1977 passes to be somewhat more effective and avoids some code duplication in
1978 later real inlining pass for testcases with very many function calls. */
1980 early_inliner (void)
1982 struct cgraph_node
*node
= cgraph_get_node (current_function_decl
);
1983 struct cgraph_edge
*edge
;
1984 unsigned int todo
= 0;
1986 bool inlined
= false;
1991 /* Do nothing if datastructures for ipa-inliner are already computed. This
1992 happens when some pass decides to construct new function and
1993 cgraph_add_new_function calls lowering passes and early optimization on
1994 it. This may confuse ourself when early inliner decide to inline call to
1995 function clone, because function clones don't have parameter list in
1996 ipa-prop matching their signature. */
1997 if (ipa_node_params_vector
.exists ())
2000 #ifdef ENABLE_CHECKING
2001 verify_cgraph_node (node
);
2004 /* Even when not optimizing or not inlining inline always-inline
2006 inlined
= inline_always_inline_functions (node
);
2010 || !flag_early_inlining
2011 /* Never inline regular functions into always-inline functions
2012 during incremental inlining. This sucks as functions calling
2013 always inline functions will get less optimized, but at the
2014 same time inlining of functions calling always inline
2015 function into an always inline function might introduce
2016 cycles of edges to be always inlined in the callgraph.
2018 We might want to be smarter and just avoid this type of inlining. */
2019 || DECL_DISREGARD_INLINE_LIMITS (node
->symbol
.decl
))
2021 else if (lookup_attribute ("flatten",
2022 DECL_ATTRIBUTES (node
->symbol
.decl
)) != NULL
)
2024 /* When the function is marked to be flattened, recursively inline
2028 "Flattening %s\n", cgraph_node_name (node
));
2029 flatten_function (node
, true);
2034 /* We iterate incremental inlining to get trivial cases of indirect
2036 while (iterations
< PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS
)
2037 && early_inline_small_functions (node
))
2039 timevar_push (TV_INTEGRATION
);
2040 todo
|= optimize_inline_calls (current_function_decl
);
2042 /* Technically we ought to recompute inline parameters so the new
2043 iteration of early inliner works as expected. We however have
2044 values approximately right and thus we only need to update edge
2045 info that might be cleared out for newly discovered edges. */
2046 for (edge
= node
->callees
; edge
; edge
= edge
->next_callee
)
2048 struct inline_edge_summary
*es
= inline_edge_summary (edge
);
2050 = estimate_num_insns (edge
->call_stmt
, &eni_size_weights
);
2052 = estimate_num_insns (edge
->call_stmt
, &eni_time_weights
);
2053 if (edge
->callee
->symbol
.decl
2054 && !gimple_check_call_matching_types (edge
->call_stmt
,
2055 edge
->callee
->symbol
.decl
))
2056 edge
->call_stmt_cannot_inline_p
= true;
2058 timevar_pop (TV_INTEGRATION
);
2063 fprintf (dump_file
, "Iterations: %i\n", iterations
);
2068 timevar_push (TV_INTEGRATION
);
2069 todo
|= optimize_inline_calls (current_function_decl
);
2070 timevar_pop (TV_INTEGRATION
);
2073 cfun
->always_inline_functions_inlined
= true;
2078 struct gimple_opt_pass pass_early_inline
=
2082 "einline", /* name */
2083 OPTGROUP_INLINE
, /* optinfo_flags */
2085 early_inliner
, /* execute */
2088 0, /* static_pass_number */
2089 TV_EARLY_INLINING
, /* tv_id */
2090 PROP_ssa
, /* properties_required */
2091 0, /* properties_provided */
2092 0, /* properties_destroyed */
2093 0, /* todo_flags_start */
2094 0 /* todo_flags_finish */
2099 /* When to run IPA inlining. Inlining of always-inline functions
2100 happens during early inlining.
2102 Enable inlining unconditoinally at -flto. We need size estimates to
2103 drive partitioning. */
2106 gate_ipa_inline (void)
2108 return optimize
|| flag_lto
|| flag_wpa
;
2111 struct ipa_opt_pass_d pass_ipa_inline
=
2115 "inline", /* name */
2116 OPTGROUP_INLINE
, /* optinfo_flags */
2117 gate_ipa_inline
, /* gate */
2118 ipa_inline
, /* execute */
2121 0, /* static_pass_number */
2122 TV_IPA_INLINING
, /* tv_id */
2123 0, /* properties_required */
2124 0, /* properties_provided */
2125 0, /* properties_destroyed */
2126 TODO_remove_functions
, /* todo_flags_finish */
2128 | TODO_remove_functions
| TODO_ggc_collect
/* todo_flags_finish */
2130 inline_generate_summary
, /* generate_summary */
2131 inline_write_summary
, /* write_summary */
2132 inline_read_summary
, /* read_summary */
2133 NULL
, /* write_optimization_summary */
2134 NULL
, /* read_optimization_summary */
2135 NULL
, /* stmt_fixup */
2137 inline_transform
, /* function_transform */
2138 NULL
, /* variable_transform */