1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2013 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
38 The inliner itself is split into two passes:
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
94 #include "coretypes.h"
97 #include "tree-inline.h"
98 #include "langhooks.h"
101 #include "diagnostic.h"
102 #include "gimple-pretty-print.h"
106 #include "tree-pass.h"
107 #include "coverage.h"
110 #include "tree-flow.h"
111 #include "ipa-prop.h"
114 #include "ipa-inline.h"
115 #include "ipa-utils.h"
117 /* Statistics we collect about inlining algorithm. */
118 static int overall_size
;
119 static gcov_type max_count
;
121 /* Return false when inlining edge E would lead to violating
122 limits on function unit growth or stack usage growth.
124 The relative function body growth limit is present generally
125 to avoid problems with non-linear behavior of the compiler.
126 To allow inlining huge functions into tiny wrapper, the limit
127 is always based on the bigger of the two functions considered.
129 For stack growth limits we always base the growth in stack usage
130 of the callers. We want to prevent applications from segfaulting
131 on stack overflow when functions with huge stack frames gets
135 caller_growth_limits (struct cgraph_edge
*e
)
137 struct cgraph_node
*to
= e
->caller
;
138 struct cgraph_node
*what
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
141 HOST_WIDE_INT stack_size_limit
= 0, inlined_stack
;
142 struct inline_summary
*info
, *what_info
, *outer_info
= inline_summary (to
);
144 /* Look for function e->caller is inlined to. While doing
145 so work out the largest function body on the way. As
146 described above, we want to base our function growth
147 limits based on that. Not on the self size of the
148 outer function, not on the self size of inline code
149 we immediately inline to. This is the most relaxed
150 interpretation of the rule "do not grow large functions
151 too much in order to prevent compiler from exploding". */
154 info
= inline_summary (to
);
155 if (limit
< info
->self_size
)
156 limit
= info
->self_size
;
157 if (stack_size_limit
< info
->estimated_self_stack_size
)
158 stack_size_limit
= info
->estimated_self_stack_size
;
159 if (to
->global
.inlined_to
)
160 to
= to
->callers
->caller
;
165 what_info
= inline_summary (what
);
167 if (limit
< what_info
->self_size
)
168 limit
= what_info
->self_size
;
170 limit
+= limit
* PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH
) / 100;
172 /* Check the size after inlining against the function limits. But allow
173 the function to shrink if it went over the limits by forced inlining. */
174 newsize
= estimate_size_after_inlining (to
, e
);
175 if (newsize
>= info
->size
176 && newsize
> PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS
)
179 e
->inline_failed
= CIF_LARGE_FUNCTION_GROWTH_LIMIT
;
183 if (!what_info
->estimated_stack_size
)
186 /* FIXME: Stack size limit often prevents inlining in Fortran programs
187 due to large i/o datastructures used by the Fortran front-end.
188 We ought to ignore this limit when we know that the edge is executed
189 on every invocation of the caller (i.e. its call statement dominates
190 exit block). We do not track this information, yet. */
191 stack_size_limit
+= ((gcov_type
)stack_size_limit
192 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH
) / 100);
194 inlined_stack
= (outer_info
->stack_frame_offset
195 + outer_info
->estimated_self_stack_size
196 + what_info
->estimated_stack_size
);
197 /* Check new stack consumption with stack consumption at the place
199 if (inlined_stack
> stack_size_limit
200 /* If function already has large stack usage from sibling
201 inline call, we can inline, too.
202 This bit overoptimistically assume that we are good at stack
204 && inlined_stack
> info
->estimated_stack_size
205 && inlined_stack
> PARAM_VALUE (PARAM_LARGE_STACK_FRAME
))
207 e
->inline_failed
= CIF_LARGE_STACK_FRAME_GROWTH_LIMIT
;
213 /* Dump info about why inlining has failed. */
216 report_inline_failed_reason (struct cgraph_edge
*e
)
220 fprintf (dump_file
, " not inlinable: %s/%i -> %s/%i, %s\n",
221 xstrdup (cgraph_node_name (e
->caller
)), e
->caller
->symbol
.order
,
222 xstrdup (cgraph_node_name (e
->callee
)), e
->callee
->symbol
.order
,
223 cgraph_inline_failed_string (e
->inline_failed
));
227 /* Decide if we can inline the edge and possibly update
228 inline_failed reason.
229 We check whether inlining is possible at all and whether
230 caller growth limits allow doing so.
232 if REPORT is true, output reason to the dump file. */
235 can_inline_edge_p (struct cgraph_edge
*e
, bool report
)
237 bool inlinable
= true;
238 enum availability avail
;
239 struct cgraph_node
*callee
240 = cgraph_function_or_thunk_node (e
->callee
, &avail
);
241 tree caller_tree
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e
->caller
->symbol
.decl
);
243 = callee
? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee
->symbol
.decl
) : NULL
;
244 struct function
*caller_cfun
= DECL_STRUCT_FUNCTION (e
->caller
->symbol
.decl
);
245 struct function
*callee_cfun
246 = callee
? DECL_STRUCT_FUNCTION (callee
->symbol
.decl
) : NULL
;
248 if (!caller_cfun
&& e
->caller
->clone_of
)
249 caller_cfun
= DECL_STRUCT_FUNCTION (e
->caller
->clone_of
->symbol
.decl
);
251 if (!callee_cfun
&& callee
&& callee
->clone_of
)
252 callee_cfun
= DECL_STRUCT_FUNCTION (callee
->clone_of
->symbol
.decl
);
254 gcc_assert (e
->inline_failed
);
256 if (!callee
|| !callee
->symbol
.definition
)
258 e
->inline_failed
= CIF_BODY_NOT_AVAILABLE
;
261 else if (!inline_summary (callee
)->inlinable
)
263 e
->inline_failed
= CIF_FUNCTION_NOT_INLINABLE
;
266 else if (avail
<= AVAIL_OVERWRITABLE
)
268 e
->inline_failed
= CIF_OVERWRITABLE
;
271 else if (e
->call_stmt_cannot_inline_p
)
273 e
->inline_failed
= CIF_MISMATCHED_ARGUMENTS
;
276 /* Don't inline if the functions have different EH personalities. */
277 else if (DECL_FUNCTION_PERSONALITY (e
->caller
->symbol
.decl
)
278 && DECL_FUNCTION_PERSONALITY (callee
->symbol
.decl
)
279 && (DECL_FUNCTION_PERSONALITY (e
->caller
->symbol
.decl
)
280 != DECL_FUNCTION_PERSONALITY (callee
->symbol
.decl
)))
282 e
->inline_failed
= CIF_EH_PERSONALITY
;
285 /* TM pure functions should not be inlined into non-TM_pure
287 else if (is_tm_pure (callee
->symbol
.decl
)
288 && !is_tm_pure (e
->caller
->symbol
.decl
))
290 e
->inline_failed
= CIF_UNSPECIFIED
;
293 /* Don't inline if the callee can throw non-call exceptions but the
295 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
296 Move the flag into cgraph node or mirror it in the inline summary. */
297 else if (callee_cfun
&& callee_cfun
->can_throw_non_call_exceptions
298 && !(caller_cfun
&& caller_cfun
->can_throw_non_call_exceptions
))
300 e
->inline_failed
= CIF_NON_CALL_EXCEPTIONS
;
303 /* Check compatibility of target optimization options. */
304 else if (!targetm
.target_option
.can_inline_p (e
->caller
->symbol
.decl
,
305 callee
->symbol
.decl
))
307 e
->inline_failed
= CIF_TARGET_OPTION_MISMATCH
;
310 /* Check if caller growth allows the inlining. */
311 else if (!DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
)
312 && !lookup_attribute ("flatten",
314 (e
->caller
->global
.inlined_to
315 ? e
->caller
->global
.inlined_to
->symbol
.decl
316 : e
->caller
->symbol
.decl
))
317 && !caller_growth_limits (e
))
319 /* Don't inline a function with a higher optimization level than the
320 caller. FIXME: this is really just tip of iceberg of handling
321 optimization attribute. */
322 else if (caller_tree
!= callee_tree
)
324 struct cl_optimization
*caller_opt
325 = TREE_OPTIMIZATION ((caller_tree
)
327 : optimization_default_node
);
329 struct cl_optimization
*callee_opt
330 = TREE_OPTIMIZATION ((callee_tree
)
332 : optimization_default_node
);
334 if (((caller_opt
->x_optimize
> callee_opt
->x_optimize
)
335 || (caller_opt
->x_optimize_size
!= callee_opt
->x_optimize_size
))
336 /* gcc.dg/pr43564.c. Look at forced inline even in -O0. */
337 && !DECL_DISREGARD_INLINE_LIMITS (e
->callee
->symbol
.decl
))
339 e
->inline_failed
= CIF_OPTIMIZATION_MISMATCH
;
344 if (!inlinable
&& report
)
345 report_inline_failed_reason (e
);
350 /* Return true if the edge E is inlinable during early inlining. */
353 can_early_inline_edge_p (struct cgraph_edge
*e
)
355 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
,
357 /* Early inliner might get called at WPA stage when IPA pass adds new
358 function. In this case we can not really do any of early inlining
359 because function bodies are missing. */
360 if (!gimple_has_body_p (callee
->symbol
.decl
))
362 e
->inline_failed
= CIF_BODY_NOT_AVAILABLE
;
365 /* In early inliner some of callees may not be in SSA form yet
366 (i.e. the callgraph is cyclic and we did not process
367 the callee by early inliner, yet). We don't have CIF code for this
368 case; later we will re-do the decision in the real inliner. */
369 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e
->caller
->symbol
.decl
))
370 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee
->symbol
.decl
)))
373 fprintf (dump_file
, " edge not inlinable: not in SSA form\n");
376 if (!can_inline_edge_p (e
, true))
382 /* Return number of calls in N. Ignore cheap builtins. */
385 num_calls (struct cgraph_node
*n
)
387 struct cgraph_edge
*e
;
390 for (e
= n
->callees
; e
; e
= e
->next_callee
)
391 if (!is_inexpensive_builtin (e
->callee
->symbol
.decl
))
397 /* Return true if we are interested in inlining small function. */
400 want_early_inline_function_p (struct cgraph_edge
*e
)
402 bool want_inline
= true;
403 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
405 if (DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
))
407 else if (!DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
408 && !flag_inline_small_functions
)
410 e
->inline_failed
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
411 report_inline_failed_reason (e
);
416 int growth
= estimate_edge_growth (e
);
421 else if (!cgraph_maybe_hot_edge_p (e
)
425 fprintf (dump_file
, " will not early inline: %s/%i->%s/%i, "
426 "call is cold and code would grow by %i\n",
427 xstrdup (cgraph_node_name (e
->caller
)),
428 e
->caller
->symbol
.order
,
429 xstrdup (cgraph_node_name (callee
)), callee
->symbol
.order
,
433 else if (growth
> PARAM_VALUE (PARAM_EARLY_INLINING_INSNS
))
436 fprintf (dump_file
, " will not early inline: %s/%i->%s/%i, "
437 "growth %i exceeds --param early-inlining-insns\n",
438 xstrdup (cgraph_node_name (e
->caller
)),
439 e
->caller
->symbol
.order
,
440 xstrdup (cgraph_node_name (callee
)), callee
->symbol
.order
,
444 else if ((n
= num_calls (callee
)) != 0
445 && growth
* (n
+ 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS
))
448 fprintf (dump_file
, " will not early inline: %s/%i->%s/%i, "
449 "growth %i exceeds --param early-inlining-insns "
450 "divided by number of calls\n",
451 xstrdup (cgraph_node_name (e
->caller
)),
452 e
->caller
->symbol
.order
,
453 xstrdup (cgraph_node_name (callee
)), callee
->symbol
.order
,
461 /* Compute time of the edge->caller + edge->callee execution when inlining
465 compute_uninlined_call_time (struct inline_summary
*callee_info
,
466 struct cgraph_edge
*edge
)
468 gcov_type uninlined_call_time
=
469 RDIV ((gcov_type
)callee_info
->time
* MAX (edge
->frequency
, 1),
471 gcov_type caller_time
= inline_summary (edge
->caller
->global
.inlined_to
472 ? edge
->caller
->global
.inlined_to
473 : edge
->caller
)->time
;
474 return uninlined_call_time
+ caller_time
;
477 /* Same as compute_uinlined_call_time but compute time when inlining
481 compute_inlined_call_time (struct cgraph_edge
*edge
,
484 gcov_type caller_time
= inline_summary (edge
->caller
->global
.inlined_to
485 ? edge
->caller
->global
.inlined_to
486 : edge
->caller
)->time
;
487 gcov_type time
= (caller_time
488 + RDIV (((gcov_type
) edge_time
489 - inline_edge_summary (edge
)->call_stmt_time
)
490 * MAX (edge
->frequency
, 1), CGRAPH_FREQ_BASE
));
491 /* Possible one roundoff error, but watch for overflows. */
492 gcc_checking_assert (time
>= INT_MIN
/ 2);
498 /* Return true if the speedup for inlining E is bigger than
499 PARAM_MAX_INLINE_MIN_SPEEDUP. */
502 big_speedup_p (struct cgraph_edge
*e
)
504 gcov_type time
= compute_uninlined_call_time (inline_summary (e
->callee
),
506 gcov_type inlined_time
= compute_inlined_call_time (e
,
507 estimate_edge_time (e
));
508 if (time
- inlined_time
509 > RDIV (time
* PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP
), 100))
514 /* Return true if we are interested in inlining small function.
515 When REPORT is true, report reason to dump file. */
518 want_inline_small_function_p (struct cgraph_edge
*e
, bool report
)
520 bool want_inline
= true;
521 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
523 if (DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
))
525 else if (!DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
526 && !flag_inline_small_functions
)
528 e
->inline_failed
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
533 int growth
= estimate_edge_growth (e
);
534 inline_hints hints
= estimate_edge_hints (e
);
535 bool big_speedup
= big_speedup_p (e
);
539 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
540 hints suggests that inlining given function is very profitable. */
541 else if (DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
542 && growth
>= MAX_INLINE_INSNS_SINGLE
544 && !(hints
& (INLINE_HINT_indirect_call
545 | INLINE_HINT_loop_iterations
546 | INLINE_HINT_array_index
547 | INLINE_HINT_loop_stride
)))
549 e
->inline_failed
= CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
;
552 /* Before giving up based on fact that caller size will grow, allow
553 functions that are called few times and eliminating the offline
554 copy will lead to overall code size reduction.
555 Not all of these will be handled by subsequent inlining of functions
556 called once: in particular weak functions are not handled or funcitons
557 that inline to multiple calls but a lot of bodies is optimized out.
558 Finally we want to inline earlier to allow inlining of callbacks.
560 This is slightly wrong on aggressive side: it is entirely possible
561 that function is called many times with a context where inlining
562 reduces code size and few times with a context where inlining increase
563 code size. Resoluting growth estimate will be negative even if it
564 would make more sense to keep offline copy and do not inline into the
565 call sites that makes the code size grow.
567 When badness orders the calls in a way that code reducing calls come
568 first, this situation is not a problem at all: after inlining all
569 "good" calls, we will realize that keeping the function around is
571 else if (growth
<= MAX_INLINE_INSNS_SINGLE
572 /* Unlike for functions called once, we play unsafe with
573 COMDATs. We can allow that since we know functions
574 in consideration are small (and thus risk is small) and
575 moreover grow estimates already accounts that COMDAT
576 functions may or may not disappear when eliminated from
577 current unit. With good probability making aggressive
578 choice in all units is going to make overall program
581 Consequently we ask cgraph_can_remove_if_no_direct_calls_p
583 cgraph_will_be_removed_from_program_if_no_direct_calls */
584 && !DECL_EXTERNAL (callee
->symbol
.decl
)
585 && cgraph_can_remove_if_no_direct_calls_p (callee
)
586 && estimate_growth (callee
) <= 0)
588 else if (!DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
589 && !flag_inline_functions
)
591 e
->inline_failed
= CIF_NOT_DECLARED_INLINED
;
594 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
595 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
596 inlining given function is very profitable. */
597 else if (!DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
599 && growth
>= ((hints
& (INLINE_HINT_indirect_call
600 | INLINE_HINT_loop_iterations
601 | INLINE_HINT_array_index
602 | INLINE_HINT_loop_stride
))
603 ? MAX (MAX_INLINE_INSNS_AUTO
,
604 MAX_INLINE_INSNS_SINGLE
)
605 : MAX_INLINE_INSNS_AUTO
))
607 e
->inline_failed
= CIF_MAX_INLINE_INSNS_AUTO_LIMIT
;
610 /* If call is cold, do not inline when function body would grow. */
611 else if (!cgraph_maybe_hot_edge_p (e
))
613 e
->inline_failed
= CIF_UNLIKELY_CALL
;
617 if (!want_inline
&& report
)
618 report_inline_failed_reason (e
);
622 /* EDGE is self recursive edge.
623 We hand two cases - when function A is inlining into itself
624 or when function A is being inlined into another inliner copy of function
627 In first case OUTER_NODE points to the toplevel copy of A, while
628 in the second case OUTER_NODE points to the outermost copy of A in B.
630 In both cases we want to be extra selective since
631 inlining the call will just introduce new recursive calls to appear. */
634 want_inline_self_recursive_call_p (struct cgraph_edge
*edge
,
635 struct cgraph_node
*outer_node
,
639 char const *reason
= NULL
;
640 bool want_inline
= true;
641 int caller_freq
= CGRAPH_FREQ_BASE
;
642 int max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO
);
644 if (DECL_DECLARED_INLINE_P (edge
->caller
->symbol
.decl
))
645 max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH
);
647 if (!cgraph_maybe_hot_edge_p (edge
))
649 reason
= "recursive call is cold";
652 else if (max_count
&& !outer_node
->count
)
654 reason
= "not executed in profile";
657 else if (depth
> max_depth
)
659 reason
= "--param max-inline-recursive-depth exceeded.";
663 if (outer_node
->global
.inlined_to
)
664 caller_freq
= outer_node
->callers
->frequency
;
668 /* Inlining of self recursive function into copy of itself within other function
669 is transformation similar to loop peeling.
671 Peeling is profitable if we can inline enough copies to make probability
672 of actual call to the self recursive function very small. Be sure that
673 the probability of recursion is small.
675 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
676 This way the expected number of recision is at most max_depth. */
679 int max_prob
= CGRAPH_FREQ_BASE
- ((CGRAPH_FREQ_BASE
+ max_depth
- 1)
682 for (i
= 1; i
< depth
; i
++)
683 max_prob
= max_prob
* max_prob
/ CGRAPH_FREQ_BASE
;
685 && (edge
->count
* CGRAPH_FREQ_BASE
/ outer_node
->count
688 reason
= "profile of recursive call is too large";
692 && (edge
->frequency
* CGRAPH_FREQ_BASE
/ caller_freq
695 reason
= "frequency of recursive call is too large";
699 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
700 depth is large. We reduce function call overhead and increase chances that
701 things fit in hardware return predictor.
703 Recursive inlining might however increase cost of stack frame setup
704 actually slowing down functions whose recursion tree is wide rather than
707 Deciding reliably on when to do recursive inlining without profile feedback
708 is tricky. For now we disable recursive inlining when probability of self
711 Recursive inlining of self recursive call within loop also results in large loop
712 depths that generally optimize badly. We may want to throttle down inlining
713 in those cases. In particular this seems to happen in one of libstdc++ rb tree
718 && (edge
->count
* 100 / outer_node
->count
719 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY
)))
721 reason
= "profile of recursive call is too small";
725 && (edge
->frequency
* 100 / caller_freq
726 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY
)))
728 reason
= "frequency of recursive call is too small";
732 if (!want_inline
&& dump_file
)
733 fprintf (dump_file
, " not inlining recursively: %s\n", reason
);
737 /* Return true when NODE has caller other than EDGE.
738 Worker for cgraph_for_node_and_aliases. */
741 check_caller_edge (struct cgraph_node
*node
, void *edge
)
743 return (node
->callers
744 && node
->callers
!= edge
);
748 /* Decide if inlining NODE would reduce unit size by eliminating
749 the offline copy of function.
750 When COLD is true the cold calls are considered, too. */
753 want_inline_function_to_all_callers_p (struct cgraph_node
*node
, bool cold
)
755 struct cgraph_node
*function
= cgraph_function_or_thunk_node (node
, NULL
);
756 struct cgraph_edge
*e
;
757 bool has_hot_call
= false;
759 /* Does it have callers? */
762 /* Already inlined? */
763 if (function
->global
.inlined_to
)
765 if (cgraph_function_or_thunk_node (node
, NULL
) != node
)
767 /* Inlining into all callers would increase size? */
768 if (estimate_growth (node
) > 0)
770 /* Maybe other aliases has more direct calls. */
771 if (cgraph_for_node_and_aliases (node
, check_caller_edge
, node
->callers
, true))
773 /* All inlines must be possible. */
774 for (e
= node
->callers
; e
; e
= e
->next_caller
)
776 if (!can_inline_edge_p (e
, true))
778 if (!has_hot_call
&& cgraph_maybe_hot_edge_p (e
))
782 if (!cold
&& !has_hot_call
)
787 #define RELATIVE_TIME_BENEFIT_RANGE (INT_MAX / 64)
789 /* Return relative time improvement for inlining EDGE in range
790 1...RELATIVE_TIME_BENEFIT_RANGE */
793 relative_time_benefit (struct inline_summary
*callee_info
,
794 struct cgraph_edge
*edge
,
797 gcov_type relbenefit
;
798 gcov_type uninlined_call_time
= compute_uninlined_call_time (callee_info
, edge
);
799 gcov_type inlined_call_time
= compute_inlined_call_time (edge
, edge_time
);
801 /* Inlining into extern inline function is not a win. */
802 if (DECL_EXTERNAL (edge
->caller
->global
.inlined_to
803 ? edge
->caller
->global
.inlined_to
->symbol
.decl
804 : edge
->caller
->symbol
.decl
))
807 /* Watch overflows. */
808 gcc_checking_assert (uninlined_call_time
>= 0);
809 gcc_checking_assert (inlined_call_time
>= 0);
810 gcc_checking_assert (uninlined_call_time
>= inlined_call_time
);
812 /* Compute relative time benefit, i.e. how much the call becomes faster.
813 ??? perhaps computing how much the caller+calle together become faster
814 would lead to more realistic results. */
815 if (!uninlined_call_time
)
816 uninlined_call_time
= 1;
818 RDIV (((gcov_type
)uninlined_call_time
- inlined_call_time
) * RELATIVE_TIME_BENEFIT_RANGE
,
819 uninlined_call_time
);
820 relbenefit
= MIN (relbenefit
, RELATIVE_TIME_BENEFIT_RANGE
);
821 gcc_checking_assert (relbenefit
>= 0);
822 relbenefit
= MAX (relbenefit
, 1);
827 /* A cost model driving the inlining heuristics in a way so the edges with
828 smallest badness are inlined first. After each inlining is performed
829 the costs of all caller edges of nodes affected are recomputed so the
830 metrics may accurately depend on values such as number of inlinable callers
831 of the function or function body size. */
834 edge_badness (struct cgraph_edge
*edge
, bool dump
)
837 int growth
, edge_time
;
838 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (edge
->callee
,
840 struct inline_summary
*callee_info
= inline_summary (callee
);
843 if (DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
))
846 growth
= estimate_edge_growth (edge
);
847 edge_time
= estimate_edge_time (edge
);
848 hints
= estimate_edge_hints (edge
);
849 gcc_checking_assert (edge_time
>= 0);
850 gcc_checking_assert (edge_time
<= callee_info
->time
);
851 gcc_checking_assert (growth
<= callee_info
->size
);
855 fprintf (dump_file
, " Badness calculation for %s/%i -> %s/%i\n",
856 xstrdup (cgraph_node_name (edge
->caller
)),
857 edge
->caller
->symbol
.order
,
858 xstrdup (cgraph_node_name (callee
)),
859 edge
->callee
->symbol
.order
);
860 fprintf (dump_file
, " size growth %i, time %i ",
863 dump_inline_hints (dump_file
, hints
);
864 if (big_speedup_p (edge
))
865 fprintf (dump_file
, " big_speedup");
866 fprintf (dump_file
, "\n");
869 /* Always prefer inlining saving code size. */
872 badness
= INT_MIN
/ 2 + growth
;
874 fprintf (dump_file
, " %i: Growth %i <= 0\n", (int) badness
,
878 /* When profiling is available, compute badness as:
880 relative_edge_count * relative_time_benefit
881 goodness = -------------------------------------------
885 The fraction is upside down, because on edge counts and time beneits
886 the bounds are known. Edge growth is essentially unlimited. */
890 int relbenefit
= relative_time_benefit (callee_info
, edge
, edge_time
);
893 ((double) edge
->count
* INT_MIN
/ 2 / max_count
/ RELATIVE_TIME_BENEFIT_RANGE
) *
894 relbenefit
) / growth
;
896 /* Be sure that insanity of the profile won't lead to increasing counts
897 in the scalling and thus to overflow in the computation above. */
898 gcc_assert (max_count
>= edge
->count
);
902 " %i (relative %f): profile info. Relative count %f"
903 " * Relative benefit %f\n",
904 (int) badness
, (double) badness
/ INT_MIN
,
905 (double) edge
->count
/ max_count
,
906 relbenefit
* 100.0 / RELATIVE_TIME_BENEFIT_RANGE
);
910 /* When function local profile is available. Compute badness as:
912 relative_time_benefit
913 goodness = ---------------------------------
914 growth_of_caller * overall_growth
918 compensated by the inline hints.
920 else if (flag_guess_branch_prob
)
922 badness
= (relative_time_benefit (callee_info
, edge
, edge_time
)
923 * (INT_MIN
/ 16 / RELATIVE_TIME_BENEFIT_RANGE
));
924 badness
/= (MIN (65536/2, growth
) * MIN (65536/2, MAX (1, callee_info
->growth
)));
925 gcc_checking_assert (badness
<=0 && badness
>= INT_MIN
/ 16);
926 if ((hints
& (INLINE_HINT_indirect_call
927 | INLINE_HINT_loop_iterations
928 | INLINE_HINT_array_index
929 | INLINE_HINT_loop_stride
))
930 || callee_info
->growth
<= 0)
932 if (hints
& (INLINE_HINT_same_scc
))
934 else if (hints
& (INLINE_HINT_in_scc
))
936 else if (hints
& (INLINE_HINT_cross_module
))
938 gcc_checking_assert (badness
<= 0 && badness
>= INT_MIN
/ 2);
939 if ((hints
& INLINE_HINT_declared_inline
) && badness
>= INT_MIN
/ 32)
944 " %i: guessed profile. frequency %f,"
945 " benefit %f%%, time w/o inlining %i, time w inlining %i"
946 " overall growth %i (current) %i (original)\n",
947 (int) badness
, (double)edge
->frequency
/ CGRAPH_FREQ_BASE
,
948 relative_time_benefit (callee_info
, edge
, edge_time
) * 100.0
949 / RELATIVE_TIME_BENEFIT_RANGE
,
950 (int)compute_uninlined_call_time (callee_info
, edge
),
951 (int)compute_inlined_call_time (edge
, edge_time
),
952 estimate_growth (callee
),
953 callee_info
->growth
);
956 /* When function local profile is not available or it does not give
957 useful information (ie frequency is zero), base the cost on
958 loop nest and overall size growth, so we optimize for overall number
959 of functions fully inlined in program. */
962 int nest
= MIN (inline_edge_summary (edge
)->loop_depth
, 8);
963 badness
= growth
* 256;
965 /* Decrease badness if call is nested. */
973 fprintf (dump_file
, " %i: no profile. nest %i\n", (int) badness
,
977 /* Ensure that we did not overflow in all the fixed point math above. */
978 gcc_assert (badness
>= INT_MIN
);
979 gcc_assert (badness
<= INT_MAX
- 1);
980 /* Make recursive inlining happen always after other inlining is done. */
981 if (cgraph_edge_recursive_p (edge
))
987 /* Recompute badness of EDGE and update its key in HEAP if needed. */
989 update_edge_key (fibheap_t heap
, struct cgraph_edge
*edge
)
991 int badness
= edge_badness (edge
, false);
994 fibnode_t n
= (fibnode_t
) edge
->aux
;
995 gcc_checking_assert (n
->data
== edge
);
997 /* fibheap_replace_key only decrease the keys.
998 When we increase the key we do not update heap
999 and instead re-insert the element once it becomes
1000 a minimum of heap. */
1001 if (badness
< n
->key
)
1003 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1006 " decreasing badness %s/%i -> %s/%i, %i to %i\n",
1007 xstrdup (cgraph_node_name (edge
->caller
)),
1008 edge
->caller
->symbol
.order
,
1009 xstrdup (cgraph_node_name (edge
->callee
)),
1010 edge
->callee
->symbol
.order
,
1014 fibheap_replace_key (heap
, n
, badness
);
1015 gcc_checking_assert (n
->key
== badness
);
1020 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1023 " enqueuing call %s/%i -> %s/%i, badness %i\n",
1024 xstrdup (cgraph_node_name (edge
->caller
)),
1025 edge
->caller
->symbol
.order
,
1026 xstrdup (cgraph_node_name (edge
->callee
)),
1027 edge
->callee
->symbol
.order
,
1030 edge
->aux
= fibheap_insert (heap
, badness
, edge
);
1035 /* NODE was inlined.
1036 All caller edges needs to be resetted because
1037 size estimates change. Similarly callees needs reset
1038 because better context may be known. */
1041 reset_edge_caches (struct cgraph_node
*node
)
1043 struct cgraph_edge
*edge
;
1044 struct cgraph_edge
*e
= node
->callees
;
1045 struct cgraph_node
*where
= node
;
1047 struct ipa_ref
*ref
;
1049 if (where
->global
.inlined_to
)
1050 where
= where
->global
.inlined_to
;
1052 /* WHERE body size has changed, the cached growth is invalid. */
1053 reset_node_growth_cache (where
);
1055 for (edge
= where
->callers
; edge
; edge
= edge
->next_caller
)
1056 if (edge
->inline_failed
)
1057 reset_edge_growth_cache (edge
);
1058 for (i
= 0; ipa_ref_list_referring_iterate (&where
->symbol
.ref_list
,
1060 if (ref
->use
== IPA_REF_ALIAS
)
1061 reset_edge_caches (ipa_ref_referring_node (ref
));
1067 if (!e
->inline_failed
&& e
->callee
->callees
)
1068 e
= e
->callee
->callees
;
1071 if (e
->inline_failed
)
1072 reset_edge_growth_cache (e
);
1079 if (e
->caller
== node
)
1081 e
= e
->caller
->callers
;
1083 while (!e
->next_callee
);
1089 /* Recompute HEAP nodes for each of caller of NODE.
1090 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1091 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1092 it is inlinable. Otherwise check all edges. */
1095 update_caller_keys (fibheap_t heap
, struct cgraph_node
*node
,
1096 bitmap updated_nodes
,
1097 struct cgraph_edge
*check_inlinablity_for
)
1099 struct cgraph_edge
*edge
;
1101 struct ipa_ref
*ref
;
1103 if ((!node
->symbol
.alias
&& !inline_summary (node
)->inlinable
)
1104 || node
->global
.inlined_to
)
1106 if (!bitmap_set_bit (updated_nodes
, node
->uid
))
1109 for (i
= 0; ipa_ref_list_referring_iterate (&node
->symbol
.ref_list
,
1111 if (ref
->use
== IPA_REF_ALIAS
)
1113 struct cgraph_node
*alias
= ipa_ref_referring_node (ref
);
1114 update_caller_keys (heap
, alias
, updated_nodes
, check_inlinablity_for
);
1117 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1118 if (edge
->inline_failed
)
1120 if (!check_inlinablity_for
1121 || check_inlinablity_for
== edge
)
1123 if (can_inline_edge_p (edge
, false)
1124 && want_inline_small_function_p (edge
, false))
1125 update_edge_key (heap
, edge
);
1128 report_inline_failed_reason (edge
);
1129 fibheap_delete_node (heap
, (fibnode_t
) edge
->aux
);
1134 update_edge_key (heap
, edge
);
1138 /* Recompute HEAP nodes for each uninlined call in NODE.
1139 This is used when we know that edge badnesses are going only to increase
1140 (we introduced new call site) and thus all we need is to insert newly
1141 created edges into heap. */
1144 update_callee_keys (fibheap_t heap
, struct cgraph_node
*node
,
1145 bitmap updated_nodes
)
1147 struct cgraph_edge
*e
= node
->callees
;
1152 if (!e
->inline_failed
&& e
->callee
->callees
)
1153 e
= e
->callee
->callees
;
1156 enum availability avail
;
1157 struct cgraph_node
*callee
;
1158 /* We do not reset callee growth cache here. Since we added a new call,
1159 growth chould have just increased and consequentely badness metric
1160 don't need updating. */
1161 if (e
->inline_failed
1162 && (callee
= cgraph_function_or_thunk_node (e
->callee
, &avail
))
1163 && inline_summary (callee
)->inlinable
1164 && avail
>= AVAIL_AVAILABLE
1165 && !bitmap_bit_p (updated_nodes
, callee
->uid
))
1167 if (can_inline_edge_p (e
, false)
1168 && want_inline_small_function_p (e
, false))
1169 update_edge_key (heap
, e
);
1172 report_inline_failed_reason (e
);
1173 fibheap_delete_node (heap
, (fibnode_t
) e
->aux
);
1183 if (e
->caller
== node
)
1185 e
= e
->caller
->callers
;
1187 while (!e
->next_callee
);
1193 /* Enqueue all recursive calls from NODE into priority queue depending on
1194 how likely we want to recursively inline the call. */
1197 lookup_recursive_calls (struct cgraph_node
*node
, struct cgraph_node
*where
,
1200 struct cgraph_edge
*e
;
1201 enum availability avail
;
1203 for (e
= where
->callees
; e
; e
= e
->next_callee
)
1204 if (e
->callee
== node
1205 || (cgraph_function_or_thunk_node (e
->callee
, &avail
) == node
1206 && avail
> AVAIL_OVERWRITABLE
))
1208 /* When profile feedback is available, prioritize by expected number
1210 fibheap_insert (heap
,
1211 !max_count
? -e
->frequency
1212 : -(e
->count
/ ((max_count
+ (1<<24) - 1) / (1<<24))),
1215 for (e
= where
->callees
; e
; e
= e
->next_callee
)
1216 if (!e
->inline_failed
)
1217 lookup_recursive_calls (node
, e
->callee
, heap
);
1220 /* Decide on recursive inlining: in the case function has recursive calls,
1221 inline until body size reaches given argument. If any new indirect edges
1222 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1226 recursive_inlining (struct cgraph_edge
*edge
,
1227 vec
<cgraph_edge_p
> *new_edges
)
1229 int limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO
);
1231 struct cgraph_node
*node
;
1232 struct cgraph_edge
*e
;
1233 struct cgraph_node
*master_clone
= NULL
, *next
;
1237 node
= edge
->caller
;
1238 if (node
->global
.inlined_to
)
1239 node
= node
->global
.inlined_to
;
1241 if (DECL_DECLARED_INLINE_P (node
->symbol
.decl
))
1242 limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE
);
1244 /* Make sure that function is small enough to be considered for inlining. */
1245 if (estimate_size_after_inlining (node
, edge
) >= limit
)
1247 heap
= fibheap_new ();
1248 lookup_recursive_calls (node
, node
, heap
);
1249 if (fibheap_empty (heap
))
1251 fibheap_delete (heap
);
1257 " Performing recursive inlining on %s\n",
1258 cgraph_node_name (node
));
1260 /* Do the inlining and update list of recursive call during process. */
1261 while (!fibheap_empty (heap
))
1263 struct cgraph_edge
*curr
1264 = (struct cgraph_edge
*) fibheap_extract_min (heap
);
1265 struct cgraph_node
*cnode
, *dest
= curr
->callee
;
1267 if (!can_inline_edge_p (curr
, true))
1270 /* MASTER_CLONE is produced in the case we already started modified
1271 the function. Be sure to redirect edge to the original body before
1272 estimating growths otherwise we will be seeing growths after inlining
1273 the already modified body. */
1276 cgraph_redirect_edge_callee (curr
, master_clone
);
1277 reset_edge_growth_cache (curr
);
1280 if (estimate_size_after_inlining (node
, curr
) > limit
)
1282 cgraph_redirect_edge_callee (curr
, dest
);
1283 reset_edge_growth_cache (curr
);
1288 for (cnode
= curr
->caller
;
1289 cnode
->global
.inlined_to
; cnode
= cnode
->callers
->caller
)
1290 if (node
->symbol
.decl
1291 == cgraph_function_or_thunk_node (curr
->callee
, NULL
)->symbol
.decl
)
1294 if (!want_inline_self_recursive_call_p (curr
, node
, false, depth
))
1296 cgraph_redirect_edge_callee (curr
, dest
);
1297 reset_edge_growth_cache (curr
);
1304 " Inlining call of depth %i", depth
);
1307 fprintf (dump_file
, " called approx. %.2f times per call",
1308 (double)curr
->count
/ node
->count
);
1310 fprintf (dump_file
, "\n");
1314 /* We need original clone to copy around. */
1315 master_clone
= cgraph_clone_node (node
, node
->symbol
.decl
,
1316 node
->count
, CGRAPH_FREQ_BASE
,
1317 false, vNULL
, true, NULL
);
1318 for (e
= master_clone
->callees
; e
; e
= e
->next_callee
)
1319 if (!e
->inline_failed
)
1320 clone_inlined_nodes (e
, true, false, NULL
);
1321 cgraph_redirect_edge_callee (curr
, master_clone
);
1322 reset_edge_growth_cache (curr
);
1325 inline_call (curr
, false, new_edges
, &overall_size
, true);
1326 lookup_recursive_calls (node
, curr
->callee
, heap
);
1330 if (!fibheap_empty (heap
) && dump_file
)
1331 fprintf (dump_file
, " Recursive inlining growth limit met.\n");
1332 fibheap_delete (heap
);
1339 "\n Inlined %i times, "
1340 "body grown from size %i to %i, time %i to %i\n", n
,
1341 inline_summary (master_clone
)->size
, inline_summary (node
)->size
,
1342 inline_summary (master_clone
)->time
, inline_summary (node
)->time
);
1344 /* Remove master clone we used for inlining. We rely that clones inlined
1345 into master clone gets queued just before master clone so we don't
1347 for (node
= cgraph_first_function (); node
!= master_clone
;
1350 next
= cgraph_next_function (node
);
1351 if (node
->global
.inlined_to
== master_clone
)
1352 cgraph_remove_node (node
);
1354 cgraph_remove_node (master_clone
);
1359 /* Given whole compilation unit estimate of INSNS, compute how large we can
1360 allow the unit to grow. */
1363 compute_max_insns (int insns
)
1365 int max_insns
= insns
;
1366 if (max_insns
< PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
))
1367 max_insns
= PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
);
1369 return ((HOST_WIDEST_INT
) max_insns
1370 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH
)) / 100);
1374 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1377 add_new_edges_to_heap (fibheap_t heap
, vec
<cgraph_edge_p
> new_edges
)
1379 while (new_edges
.length () > 0)
1381 struct cgraph_edge
*edge
= new_edges
.pop ();
1383 gcc_assert (!edge
->aux
);
1384 if (edge
->inline_failed
1385 && can_inline_edge_p (edge
, true)
1386 && want_inline_small_function_p (edge
, true))
1387 edge
->aux
= fibheap_insert (heap
, edge_badness (edge
, false), edge
);
1392 /* We use greedy algorithm for inlining of small functions:
1393 All inline candidates are put into prioritized heap ordered in
1396 The inlining of small functions is bounded by unit growth parameters. */
1399 inline_small_functions (void)
1401 struct cgraph_node
*node
;
1402 struct cgraph_edge
*edge
;
1403 fibheap_t edge_heap
= fibheap_new ();
1404 bitmap updated_nodes
= BITMAP_ALLOC (NULL
);
1405 int min_size
, max_size
;
1406 vec
<cgraph_edge_p
> new_indirect_edges
= vNULL
;
1407 int initial_size
= 0;
1408 struct cgraph_node
**order
= XCNEWVEC (struct cgraph_node
*, cgraph_n_nodes
);
1410 if (flag_indirect_inlining
)
1411 new_indirect_edges
.create (8);
1413 /* Compute overall unit size and other global parameters used by badness
1417 ipa_reduced_postorder (order
, true, true, NULL
);
1420 FOR_EACH_DEFINED_FUNCTION (node
)
1421 if (!node
->global
.inlined_to
)
1423 if (cgraph_function_with_gimple_body_p (node
)
1424 || node
->thunk
.thunk_p
)
1426 struct inline_summary
*info
= inline_summary (node
);
1427 struct ipa_dfs_info
*dfs
= (struct ipa_dfs_info
*) node
->symbol
.aux
;
1429 if (!DECL_EXTERNAL (node
->symbol
.decl
))
1430 initial_size
+= info
->size
;
1431 info
->growth
= estimate_growth (node
);
1432 if (dfs
&& dfs
->next_cycle
)
1434 struct cgraph_node
*n2
;
1435 int id
= dfs
->scc_no
+ 1;
1437 n2
= ((struct ipa_dfs_info
*) node
->symbol
.aux
)->next_cycle
)
1439 struct inline_summary
*info2
= inline_summary (n2
);
1447 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1448 if (max_count
< edge
->count
)
1449 max_count
= edge
->count
;
1451 ipa_free_postorder_info ();
1452 initialize_growth_caches ();
1456 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1459 overall_size
= initial_size
;
1460 max_size
= compute_max_insns (overall_size
);
1461 min_size
= overall_size
;
1463 /* Populate the heeap with all edges we might inline. */
1465 FOR_EACH_DEFINED_FUNCTION (node
)
1466 if (!node
->global
.inlined_to
)
1469 fprintf (dump_file
, "Enqueueing calls of %s/%i.\n",
1470 cgraph_node_name (node
), node
->symbol
.order
);
1472 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1473 if (edge
->inline_failed
1474 && can_inline_edge_p (edge
, true)
1475 && want_inline_small_function_p (edge
, true)
1476 && edge
->inline_failed
)
1478 gcc_assert (!edge
->aux
);
1479 update_edge_key (edge_heap
, edge
);
1483 gcc_assert (in_lto_p
1485 || (profile_info
&& flag_branch_probabilities
));
1487 while (!fibheap_empty (edge_heap
))
1489 int old_size
= overall_size
;
1490 struct cgraph_node
*where
, *callee
;
1491 int badness
= fibheap_min_key (edge_heap
);
1492 int current_badness
;
1496 edge
= (struct cgraph_edge
*) fibheap_extract_min (edge_heap
);
1497 gcc_assert (edge
->aux
);
1499 if (!edge
->inline_failed
)
1502 /* Be sure that caches are maintained consistent.
1503 We can not make this ENABLE_CHECKING only because it cause different
1504 updates of the fibheap queue. */
1505 cached_badness
= edge_badness (edge
, false);
1506 reset_edge_growth_cache (edge
);
1507 reset_node_growth_cache (edge
->callee
);
1509 /* When updating the edge costs, we only decrease badness in the keys.
1510 Increases of badness are handled lazilly; when we see key with out
1511 of date value on it, we re-insert it now. */
1512 current_badness
= edge_badness (edge
, false);
1513 gcc_assert (cached_badness
== current_badness
);
1514 gcc_assert (current_badness
>= badness
);
1515 if (current_badness
!= badness
)
1517 edge
->aux
= fibheap_insert (edge_heap
, current_badness
, edge
);
1521 if (!can_inline_edge_p (edge
, true))
1524 callee
= cgraph_function_or_thunk_node (edge
->callee
, NULL
);
1525 growth
= estimate_edge_growth (edge
);
1529 "\nConsidering %s/%i with %i size\n",
1530 cgraph_node_name (callee
), callee
->symbol
.order
,
1531 inline_summary (callee
)->size
);
1533 " to be inlined into %s/%i in %s:%i\n"
1534 " Estimated growth after inlined into all is %+i insns.\n"
1535 " Estimated badness is %i, frequency %.2f.\n",
1536 cgraph_node_name (edge
->caller
), edge
->caller
->symbol
.order
,
1537 flag_wpa
? "unknown"
1538 : gimple_filename ((const_gimple
) edge
->call_stmt
),
1540 : gimple_lineno ((const_gimple
) edge
->call_stmt
),
1541 estimate_growth (callee
),
1543 edge
->frequency
/ (double)CGRAPH_FREQ_BASE
);
1545 fprintf (dump_file
," Called "HOST_WIDEST_INT_PRINT_DEC
"x\n",
1547 if (dump_flags
& TDF_DETAILS
)
1548 edge_badness (edge
, true);
1551 if (overall_size
+ growth
> max_size
1552 && !DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
))
1554 edge
->inline_failed
= CIF_INLINE_UNIT_GROWTH_LIMIT
;
1555 report_inline_failed_reason (edge
);
1559 if (!want_inline_small_function_p (edge
, true))
1562 /* Heuristics for inlining small functions works poorly for
1563 recursive calls where we do efect similar to loop unrolling.
1564 When inliing such edge seems profitable, leave decision on
1565 specific inliner. */
1566 if (cgraph_edge_recursive_p (edge
))
1568 where
= edge
->caller
;
1569 if (where
->global
.inlined_to
)
1570 where
= where
->global
.inlined_to
;
1571 if (!recursive_inlining (edge
,
1572 flag_indirect_inlining
1573 ? &new_indirect_edges
: NULL
))
1575 edge
->inline_failed
= CIF_RECURSIVE_INLINING
;
1578 reset_edge_caches (where
);
1579 /* Recursive inliner inlines all recursive calls of the function
1580 at once. Consequently we need to update all callee keys. */
1581 if (flag_indirect_inlining
)
1582 add_new_edges_to_heap (edge_heap
, new_indirect_edges
);
1583 update_callee_keys (edge_heap
, where
, updated_nodes
);
1587 struct cgraph_node
*outer_node
= NULL
;
1590 /* Consider the case where self recursive function A is inlined into B.
1591 This is desired optimization in some cases, since it leads to effect
1592 similar of loop peeling and we might completely optimize out the
1593 recursive call. However we must be extra selective. */
1595 where
= edge
->caller
;
1596 while (where
->global
.inlined_to
)
1598 if (where
->symbol
.decl
== callee
->symbol
.decl
)
1599 outer_node
= where
, depth
++;
1600 where
= where
->callers
->caller
;
1603 && !want_inline_self_recursive_call_p (edge
, outer_node
,
1607 = (DECL_DISREGARD_INLINE_LIMITS (edge
->callee
->symbol
.decl
)
1608 ? CIF_RECURSIVE_INLINING
: CIF_UNSPECIFIED
);
1611 else if (depth
&& dump_file
)
1612 fprintf (dump_file
, " Peeling recursion with depth %i\n", depth
);
1614 gcc_checking_assert (!callee
->global
.inlined_to
);
1615 inline_call (edge
, true, &new_indirect_edges
, &overall_size
, true);
1616 if (flag_indirect_inlining
)
1617 add_new_edges_to_heap (edge_heap
, new_indirect_edges
);
1619 reset_edge_caches (edge
->callee
);
1620 reset_node_growth_cache (callee
);
1622 update_callee_keys (edge_heap
, where
, updated_nodes
);
1624 where
= edge
->caller
;
1625 if (where
->global
.inlined_to
)
1626 where
= where
->global
.inlined_to
;
1628 /* Our profitability metric can depend on local properties
1629 such as number of inlinable calls and size of the function body.
1630 After inlining these properties might change for the function we
1631 inlined into (since it's body size changed) and for the functions
1632 called by function we inlined (since number of it inlinable callers
1634 update_caller_keys (edge_heap
, where
, updated_nodes
, NULL
);
1635 bitmap_clear (updated_nodes
);
1640 " Inlined into %s which now has time %i and size %i,"
1641 "net change of %+i.\n",
1642 cgraph_node_name (edge
->caller
),
1643 inline_summary (edge
->caller
)->time
,
1644 inline_summary (edge
->caller
)->size
,
1645 overall_size
- old_size
);
1647 if (min_size
> overall_size
)
1649 min_size
= overall_size
;
1650 max_size
= compute_max_insns (min_size
);
1653 fprintf (dump_file
, "New minimal size reached: %i\n", min_size
);
1657 free_growth_caches ();
1658 new_indirect_edges
.release ();
1659 fibheap_delete (edge_heap
);
1662 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1663 initial_size
, overall_size
,
1664 initial_size
? overall_size
* 100 / (initial_size
) - 100: 0);
1665 BITMAP_FREE (updated_nodes
);
1668 /* Flatten NODE. Performed both during early inlining and
1669 at IPA inlining time. */
1672 flatten_function (struct cgraph_node
*node
, bool early
)
1674 struct cgraph_edge
*e
;
1676 /* We shouldn't be called recursively when we are being processed. */
1677 gcc_assert (node
->symbol
.aux
== NULL
);
1679 node
->symbol
.aux
= (void *) node
;
1681 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1683 struct cgraph_node
*orig_callee
;
1684 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
1686 /* We've hit cycle? It is time to give up. */
1687 if (callee
->symbol
.aux
)
1691 "Not inlining %s into %s to avoid cycle.\n",
1692 xstrdup (cgraph_node_name (callee
)),
1693 xstrdup (cgraph_node_name (e
->caller
)));
1694 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
1698 /* When the edge is already inlined, we just need to recurse into
1699 it in order to fully flatten the leaves. */
1700 if (!e
->inline_failed
)
1702 flatten_function (callee
, early
);
1706 /* Flatten attribute needs to be processed during late inlining. For
1707 extra code quality we however do flattening during early optimization,
1710 ? !can_inline_edge_p (e
, true)
1711 : !can_early_inline_edge_p (e
))
1714 if (cgraph_edge_recursive_p (e
))
1717 fprintf (dump_file
, "Not inlining: recursive call.\n");
1721 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node
->symbol
.decl
))
1722 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee
->symbol
.decl
)))
1725 fprintf (dump_file
, "Not inlining: SSA form does not match.\n");
1729 /* Inline the edge and flatten the inline clone. Avoid
1730 recursing through the original node if the node was cloned. */
1732 fprintf (dump_file
, " Inlining %s into %s.\n",
1733 xstrdup (cgraph_node_name (callee
)),
1734 xstrdup (cgraph_node_name (e
->caller
)));
1735 orig_callee
= callee
;
1736 inline_call (e
, true, NULL
, NULL
, false);
1737 if (e
->callee
!= orig_callee
)
1738 orig_callee
->symbol
.aux
= (void *) node
;
1739 flatten_function (e
->callee
, early
);
1740 if (e
->callee
!= orig_callee
)
1741 orig_callee
->symbol
.aux
= NULL
;
1744 node
->symbol
.aux
= NULL
;
1745 if (!node
->global
.inlined_to
)
1746 inline_update_overall_summary (node
);
1749 /* Decide on the inlining. We do so in the topological order to avoid
1750 expenses on updating data structures. */
1755 struct cgraph_node
*node
;
1757 struct cgraph_node
**order
=
1758 XCNEWVEC (struct cgraph_node
*, cgraph_n_nodes
);
1761 if (in_lto_p
&& optimize
)
1762 ipa_update_after_lto_read ();
1765 dump_inline_summaries (dump_file
);
1767 nnodes
= ipa_reverse_postorder (order
);
1769 FOR_EACH_FUNCTION (node
)
1770 node
->symbol
.aux
= 0;
1773 fprintf (dump_file
, "\nFlattening functions:\n");
1775 /* In the first pass handle functions to be flattened. Do this with
1776 a priority so none of our later choices will make this impossible. */
1777 for (i
= nnodes
- 1; i
>= 0; i
--)
1781 /* Handle nodes to be flattened.
1782 Ideally when processing callees we stop inlining at the
1783 entry of cycles, possibly cloning that entry point and
1784 try to flatten itself turning it into a self-recursive
1786 if (lookup_attribute ("flatten",
1787 DECL_ATTRIBUTES (node
->symbol
.decl
)) != NULL
)
1791 "Flattening %s\n", cgraph_node_name (node
));
1792 flatten_function (node
, false);
1796 inline_small_functions ();
1798 /* Do first after-inlining removal. We want to remove all "stale" extern inline
1799 functions and virtual functions so we really know what is called once. */
1800 symtab_remove_unreachable_nodes (false, dump_file
);
1803 /* Inline functions with a property that after inlining into all callers the
1804 code size will shrink because the out-of-line copy is eliminated.
1805 We do this regardless on the callee size as long as function growth limits
1807 if (flag_inline_functions_called_once
)
1812 "\nDeciding on functions to be inlined into all callers:\n");
1814 /* Inlining one function called once has good chance of preventing
1815 inlining other function into the same callee. Ideally we should
1816 work in priority order, but probably inlining hot functions first
1817 is good cut without the extra pain of maintaining the queue.
1819 ??? this is not really fitting the bill perfectly: inlining function
1820 into callee often leads to better optimization of callee due to
1821 increased context for optimization.
1822 For example if main() function calls a function that outputs help
1823 and then function that does the main optmization, we should inline
1824 the second with priority even if both calls are cold by themselves.
1826 We probably want to implement new predicate replacing our use of
1827 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
1829 for (cold
= 0; cold
<= 1; cold
++)
1831 FOR_EACH_DEFINED_FUNCTION (node
)
1833 if (want_inline_function_to_all_callers_p (node
, cold
))
1836 struct cgraph_edge
*e
;
1837 for (e
= node
->callers
; e
; e
= e
->next_caller
)
1839 while (node
->callers
&& !node
->global
.inlined_to
)
1841 struct cgraph_node
*caller
= node
->callers
->caller
;
1846 "\nInlining %s size %i.\n",
1847 cgraph_node_name (node
),
1848 inline_summary (node
)->size
);
1850 " Called once from %s %i insns.\n",
1851 cgraph_node_name (node
->callers
->caller
),
1852 inline_summary (node
->callers
->caller
)->size
);
1855 inline_call (node
->callers
, true, NULL
, NULL
, true);
1858 " Inlined into %s which now has %i size\n",
1859 cgraph_node_name (caller
),
1860 inline_summary (caller
)->size
);
1864 fprintf (dump_file
, "New calls found; giving up.\n");
1873 /* Free ipa-prop structures if they are no longer needed. */
1875 ipa_free_all_structures_after_iinln ();
1879 "\nInlined %i calls, eliminated %i functions\n\n",
1880 ncalls_inlined
, nfunctions_inlined
);
1883 dump_inline_summaries (dump_file
);
1884 /* In WPA we use inline summaries for partitioning process. */
1886 inline_free_summary ();
1890 /* Inline always-inline function calls in NODE. */
1893 inline_always_inline_functions (struct cgraph_node
*node
)
1895 struct cgraph_edge
*e
;
1896 bool inlined
= false;
1898 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1900 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
1901 if (!DECL_DISREGARD_INLINE_LIMITS (callee
->symbol
.decl
))
1904 if (cgraph_edge_recursive_p (e
))
1907 fprintf (dump_file
, " Not inlining recursive call to %s.\n",
1908 cgraph_node_name (e
->callee
));
1909 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
1913 if (!can_early_inline_edge_p (e
))
1915 /* Set inlined to true if the callee is marked "always_inline" but
1916 is not inlinable. This will allow flagging an error later in
1917 expand_call_inline in tree-inline.c. */
1918 if (lookup_attribute ("always_inline",
1919 DECL_ATTRIBUTES (callee
->symbol
.decl
)) != NULL
)
1925 fprintf (dump_file
, " Inlining %s into %s (always_inline).\n",
1926 xstrdup (cgraph_node_name (e
->callee
)),
1927 xstrdup (cgraph_node_name (e
->caller
)));
1928 inline_call (e
, true, NULL
, NULL
, false);
1932 inline_update_overall_summary (node
);
1937 /* Decide on the inlining. We do so in the topological order to avoid
1938 expenses on updating data structures. */
1941 early_inline_small_functions (struct cgraph_node
*node
)
1943 struct cgraph_edge
*e
;
1944 bool inlined
= false;
1946 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1948 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
1949 if (!inline_summary (callee
)->inlinable
1950 || !e
->inline_failed
)
1953 /* Do not consider functions not declared inline. */
1954 if (!DECL_DECLARED_INLINE_P (callee
->symbol
.decl
)
1955 && !flag_inline_small_functions
1956 && !flag_inline_functions
)
1960 fprintf (dump_file
, "Considering inline candidate %s.\n",
1961 cgraph_node_name (callee
));
1963 if (!can_early_inline_edge_p (e
))
1966 if (cgraph_edge_recursive_p (e
))
1969 fprintf (dump_file
, " Not inlining: recursive call.\n");
1973 if (!want_early_inline_function_p (e
))
1977 fprintf (dump_file
, " Inlining %s into %s.\n",
1978 xstrdup (cgraph_node_name (callee
)),
1979 xstrdup (cgraph_node_name (e
->caller
)));
1980 inline_call (e
, true, NULL
, NULL
, true);
1987 /* Do inlining of small functions. Doing so early helps profiling and other
1988 passes to be somewhat more effective and avoids some code duplication in
1989 later real inlining pass for testcases with very many function calls. */
1991 early_inliner (void)
1993 struct cgraph_node
*node
= cgraph_get_node (current_function_decl
);
1994 struct cgraph_edge
*edge
;
1995 unsigned int todo
= 0;
1997 bool inlined
= false;
2002 /* Do nothing if datastructures for ipa-inliner are already computed. This
2003 happens when some pass decides to construct new function and
2004 cgraph_add_new_function calls lowering passes and early optimization on
2005 it. This may confuse ourself when early inliner decide to inline call to
2006 function clone, because function clones don't have parameter list in
2007 ipa-prop matching their signature. */
2008 if (ipa_node_params_vector
.exists ())
2011 #ifdef ENABLE_CHECKING
2012 verify_cgraph_node (node
);
2015 /* Even when not optimizing or not inlining inline always-inline
2017 inlined
= inline_always_inline_functions (node
);
2021 || !flag_early_inlining
2022 /* Never inline regular functions into always-inline functions
2023 during incremental inlining. This sucks as functions calling
2024 always inline functions will get less optimized, but at the
2025 same time inlining of functions calling always inline
2026 function into an always inline function might introduce
2027 cycles of edges to be always inlined in the callgraph.
2029 We might want to be smarter and just avoid this type of inlining. */
2030 || DECL_DISREGARD_INLINE_LIMITS (node
->symbol
.decl
))
2032 else if (lookup_attribute ("flatten",
2033 DECL_ATTRIBUTES (node
->symbol
.decl
)) != NULL
)
2035 /* When the function is marked to be flattened, recursively inline
2039 "Flattening %s\n", cgraph_node_name (node
));
2040 flatten_function (node
, true);
2045 /* We iterate incremental inlining to get trivial cases of indirect
2047 while (iterations
< PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS
)
2048 && early_inline_small_functions (node
))
2050 timevar_push (TV_INTEGRATION
);
2051 todo
|= optimize_inline_calls (current_function_decl
);
2053 /* Technically we ought to recompute inline parameters so the new
2054 iteration of early inliner works as expected. We however have
2055 values approximately right and thus we only need to update edge
2056 info that might be cleared out for newly discovered edges. */
2057 for (edge
= node
->callees
; edge
; edge
= edge
->next_callee
)
2059 struct inline_edge_summary
*es
= inline_edge_summary (edge
);
2061 = estimate_num_insns (edge
->call_stmt
, &eni_size_weights
);
2063 = estimate_num_insns (edge
->call_stmt
, &eni_time_weights
);
2064 if (edge
->callee
->symbol
.decl
2065 && !gimple_check_call_matching_types (
2066 edge
->call_stmt
, edge
->callee
->symbol
.decl
, false))
2067 edge
->call_stmt_cannot_inline_p
= true;
2069 timevar_pop (TV_INTEGRATION
);
2074 fprintf (dump_file
, "Iterations: %i\n", iterations
);
2079 timevar_push (TV_INTEGRATION
);
2080 todo
|= optimize_inline_calls (current_function_decl
);
2081 timevar_pop (TV_INTEGRATION
);
2084 cfun
->always_inline_functions_inlined
= true;
2089 struct gimple_opt_pass pass_early_inline
=
2093 "einline", /* name */
2094 OPTGROUP_INLINE
, /* optinfo_flags */
2096 early_inliner
, /* execute */
2099 0, /* static_pass_number */
2100 TV_EARLY_INLINING
, /* tv_id */
2101 PROP_ssa
, /* properties_required */
2102 0, /* properties_provided */
2103 0, /* properties_destroyed */
2104 0, /* todo_flags_start */
2105 0 /* todo_flags_finish */
2110 /* When to run IPA inlining. Inlining of always-inline functions
2111 happens during early inlining.
2113 Enable inlining unconditoinally at -flto. We need size estimates to
2114 drive partitioning. */
2117 gate_ipa_inline (void)
2119 return optimize
|| flag_lto
|| flag_wpa
;
2122 struct ipa_opt_pass_d pass_ipa_inline
=
2126 "inline", /* name */
2127 OPTGROUP_INLINE
, /* optinfo_flags */
2128 gate_ipa_inline
, /* gate */
2129 ipa_inline
, /* execute */
2132 0, /* static_pass_number */
2133 TV_IPA_INLINING
, /* tv_id */
2134 0, /* properties_required */
2135 0, /* properties_provided */
2136 0, /* properties_destroyed */
2137 TODO_remove_functions
, /* todo_flags_finish */
2139 | TODO_remove_functions
/* todo_flags_finish */
2141 inline_generate_summary
, /* generate_summary */
2142 inline_write_summary
, /* write_summary */
2143 inline_read_summary
, /* read_summary */
2144 NULL
, /* write_optimization_summary */
2145 NULL
, /* read_optimization_summary */
2146 NULL
, /* stmt_fixup */
2148 inline_transform
, /* function_transform */
2149 NULL
, /* variable_transform */