1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2019 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
38 The inliner itself is split into two passes:
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
62 Because of lack of whole unit knowledge, the pass cannot really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
94 #include "coretypes.h"
100 #include "alloc-pool.h"
101 #include "tree-pass.h"
102 #include "gimple-ssa.h"
104 #include "lto-streamer.h"
105 #include "trans-mem.h"
107 #include "tree-inline.h"
110 #include "symbol-summary.h"
111 #include "tree-vrp.h"
112 #include "ipa-prop.h"
113 #include "ipa-fnsummary.h"
114 #include "ipa-inline.h"
115 #include "ipa-utils.h"
117 #include "auto-profile.h"
118 #include "builtins.h"
119 #include "fibonacci_heap.h"
120 #include "stringpool.h"
124 typedef fibonacci_heap
<sreal
, cgraph_edge
> edge_heap_t
;
125 typedef fibonacci_node
<sreal
, cgraph_edge
> edge_heap_node_t
;
127 /* Statistics we collect about inlining algorithm. */
128 static int overall_size
;
129 static profile_count max_count
;
130 static profile_count spec_rem
;
132 /* Return false when inlining edge E would lead to violating
133 limits on function unit growth or stack usage growth.
135 The relative function body growth limit is present generally
136 to avoid problems with non-linear behavior of the compiler.
137 To allow inlining huge functions into tiny wrapper, the limit
138 is always based on the bigger of the two functions considered.
140 For stack growth limits we always base the growth in stack usage
141 of the callers. We want to prevent applications from segfaulting
142 on stack overflow when functions with huge stack frames gets
146 caller_growth_limits (struct cgraph_edge
*e
)
148 struct cgraph_node
*to
= e
->caller
;
149 struct cgraph_node
*what
= e
->callee
->ultimate_alias_target ();
152 HOST_WIDE_INT stack_size_limit
= 0, inlined_stack
;
153 ipa_fn_summary
*info
, *what_info
;
154 ipa_fn_summary
*outer_info
= ipa_fn_summaries
->get (to
);
156 /* Look for function e->caller is inlined to. While doing
157 so work out the largest function body on the way. As
158 described above, we want to base our function growth
159 limits based on that. Not on the self size of the
160 outer function, not on the self size of inline code
161 we immediately inline to. This is the most relaxed
162 interpretation of the rule "do not grow large functions
163 too much in order to prevent compiler from exploding". */
166 info
= ipa_fn_summaries
->get (to
);
167 if (limit
< info
->self_size
)
168 limit
= info
->self_size
;
169 if (stack_size_limit
< info
->estimated_self_stack_size
)
170 stack_size_limit
= info
->estimated_self_stack_size
;
171 if (to
->global
.inlined_to
)
172 to
= to
->callers
->caller
;
177 what_info
= ipa_fn_summaries
->get (what
);
179 if (limit
< what_info
->self_size
)
180 limit
= what_info
->self_size
;
182 limit
+= limit
* PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH
) / 100;
184 /* Check the size after inlining against the function limits. But allow
185 the function to shrink if it went over the limits by forced inlining. */
186 newsize
= estimate_size_after_inlining (to
, e
);
187 if (newsize
>= info
->size
188 && newsize
> PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS
)
191 e
->inline_failed
= CIF_LARGE_FUNCTION_GROWTH_LIMIT
;
195 if (!what_info
->estimated_stack_size
)
198 /* FIXME: Stack size limit often prevents inlining in Fortran programs
199 due to large i/o datastructures used by the Fortran front-end.
200 We ought to ignore this limit when we know that the edge is executed
201 on every invocation of the caller (i.e. its call statement dominates
202 exit block). We do not track this information, yet. */
203 stack_size_limit
+= ((gcov_type
)stack_size_limit
204 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH
) / 100);
206 inlined_stack
= (outer_info
->stack_frame_offset
207 + outer_info
->estimated_self_stack_size
208 + what_info
->estimated_stack_size
);
209 /* Check new stack consumption with stack consumption at the place
211 if (inlined_stack
> stack_size_limit
212 /* If function already has large stack usage from sibling
213 inline call, we can inline, too.
214 This bit overoptimistically assume that we are good at stack
216 && inlined_stack
> info
->estimated_stack_size
217 && inlined_stack
> PARAM_VALUE (PARAM_LARGE_STACK_FRAME
))
219 e
->inline_failed
= CIF_LARGE_STACK_FRAME_GROWTH_LIMIT
;
225 /* Dump info about why inlining has failed. */
228 report_inline_failed_reason (struct cgraph_edge
*e
)
230 if (dump_enabled_p ())
232 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
233 " not inlinable: %C -> %C, %s\n",
234 e
->caller
, e
->callee
,
235 cgraph_inline_failed_string (e
->inline_failed
));
236 if ((e
->inline_failed
== CIF_TARGET_OPTION_MISMATCH
237 || e
->inline_failed
== CIF_OPTIMIZATION_MISMATCH
)
238 && e
->caller
->lto_file_data
239 && e
->callee
->ultimate_alias_target ()->lto_file_data
)
241 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
242 " LTO objects: %s, %s\n",
243 e
->caller
->lto_file_data
->file_name
,
244 e
->callee
->ultimate_alias_target ()->lto_file_data
->file_name
);
246 if (e
->inline_failed
== CIF_TARGET_OPTION_MISMATCH
)
248 cl_target_option_print_diff
249 (dump_file
, 2, target_opts_for_fn (e
->caller
->decl
),
250 target_opts_for_fn (e
->callee
->ultimate_alias_target ()->decl
));
251 if (e
->inline_failed
== CIF_OPTIMIZATION_MISMATCH
)
253 cl_optimization_print_diff
254 (dump_file
, 2, opts_for_fn (e
->caller
->decl
),
255 opts_for_fn (e
->callee
->ultimate_alias_target ()->decl
));
259 /* Decide whether sanitizer-related attributes allow inlining. */
262 sanitize_attrs_match_for_inline_p (const_tree caller
, const_tree callee
)
264 if (!caller
|| !callee
)
267 /* Allow inlining always_inline functions into no_sanitize_address
269 if (!sanitize_flags_p (SANITIZE_ADDRESS
, caller
)
270 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (callee
)))
273 return ((sanitize_flags_p (SANITIZE_ADDRESS
, caller
)
274 == sanitize_flags_p (SANITIZE_ADDRESS
, callee
))
275 && (sanitize_flags_p (SANITIZE_POINTER_COMPARE
, caller
)
276 == sanitize_flags_p (SANITIZE_POINTER_COMPARE
, callee
))
277 && (sanitize_flags_p (SANITIZE_POINTER_SUBTRACT
, caller
)
278 == sanitize_flags_p (SANITIZE_POINTER_SUBTRACT
, callee
)));
281 /* Used for flags where it is safe to inline when caller's value is
282 grater than callee's. */
283 #define check_maybe_up(flag) \
284 (opts_for_fn (caller->decl)->x_##flag \
285 != opts_for_fn (callee->decl)->x_##flag \
287 || opts_for_fn (caller->decl)->x_##flag \
288 < opts_for_fn (callee->decl)->x_##flag))
289 /* Used for flags where it is safe to inline when caller's value is
290 smaller than callee's. */
291 #define check_maybe_down(flag) \
292 (opts_for_fn (caller->decl)->x_##flag \
293 != opts_for_fn (callee->decl)->x_##flag \
295 || opts_for_fn (caller->decl)->x_##flag \
296 > opts_for_fn (callee->decl)->x_##flag))
297 /* Used for flags where exact match is needed for correctness. */
298 #define check_match(flag) \
299 (opts_for_fn (caller->decl)->x_##flag \
300 != opts_for_fn (callee->decl)->x_##flag)
302 /* Decide if we can inline the edge and possibly update
303 inline_failed reason.
304 We check whether inlining is possible at all and whether
305 caller growth limits allow doing so.
307 if REPORT is true, output reason to the dump file. */
310 can_inline_edge_p (struct cgraph_edge
*e
, bool report
,
313 gcc_checking_assert (e
->inline_failed
);
315 if (cgraph_inline_failed_type (e
->inline_failed
) == CIF_FINAL_ERROR
)
318 report_inline_failed_reason (e
);
322 bool inlinable
= true;
323 enum availability avail
;
324 cgraph_node
*caller
= e
->caller
->global
.inlined_to
325 ? e
->caller
->global
.inlined_to
: e
->caller
;
326 cgraph_node
*callee
= e
->callee
->ultimate_alias_target (&avail
, caller
);
328 if (!callee
->definition
)
330 e
->inline_failed
= CIF_BODY_NOT_AVAILABLE
;
333 if (!early
&& (!opt_for_fn (callee
->decl
, optimize
)
334 || !opt_for_fn (caller
->decl
, optimize
)))
336 e
->inline_failed
= CIF_FUNCTION_NOT_OPTIMIZED
;
339 else if (callee
->calls_comdat_local
)
341 e
->inline_failed
= CIF_USES_COMDAT_LOCAL
;
344 else if (avail
<= AVAIL_INTERPOSABLE
)
346 e
->inline_failed
= CIF_OVERWRITABLE
;
349 /* All edges with call_stmt_cannot_inline_p should have inline_failed
350 initialized to one of FINAL_ERROR reasons. */
351 else if (e
->call_stmt_cannot_inline_p
)
353 /* Don't inline if the functions have different EH personalities. */
354 else if (DECL_FUNCTION_PERSONALITY (caller
->decl
)
355 && DECL_FUNCTION_PERSONALITY (callee
->decl
)
356 && (DECL_FUNCTION_PERSONALITY (caller
->decl
)
357 != DECL_FUNCTION_PERSONALITY (callee
->decl
)))
359 e
->inline_failed
= CIF_EH_PERSONALITY
;
362 /* TM pure functions should not be inlined into non-TM_pure
364 else if (is_tm_pure (callee
->decl
) && !is_tm_pure (caller
->decl
))
366 e
->inline_failed
= CIF_UNSPECIFIED
;
369 /* Check compatibility of target optimization options. */
370 else if (!targetm
.target_option
.can_inline_p (caller
->decl
,
373 e
->inline_failed
= CIF_TARGET_OPTION_MISMATCH
;
376 else if (ipa_fn_summaries
->get (callee
) == NULL
377 || !ipa_fn_summaries
->get (callee
)->inlinable
)
379 e
->inline_failed
= CIF_FUNCTION_NOT_INLINABLE
;
382 /* Don't inline a function with mismatched sanitization attributes. */
383 else if (!sanitize_attrs_match_for_inline_p (caller
->decl
, callee
->decl
))
385 e
->inline_failed
= CIF_ATTRIBUTE_MISMATCH
;
388 if (!inlinable
&& report
)
389 report_inline_failed_reason (e
);
393 /* Decide if we can inline the edge and possibly update
394 inline_failed reason.
395 We check whether inlining is possible at all and whether
396 caller growth limits allow doing so.
398 if REPORT is true, output reason to the dump file.
400 if DISREGARD_LIMITS is true, ignore size limits. */
403 can_inline_edge_by_limits_p (struct cgraph_edge
*e
, bool report
,
404 bool disregard_limits
= false, bool early
= false)
406 gcc_checking_assert (e
->inline_failed
);
408 if (cgraph_inline_failed_type (e
->inline_failed
) == CIF_FINAL_ERROR
)
411 report_inline_failed_reason (e
);
415 bool inlinable
= true;
416 enum availability avail
;
417 cgraph_node
*caller
= e
->caller
->global
.inlined_to
418 ? e
->caller
->global
.inlined_to
: e
->caller
;
419 cgraph_node
*callee
= e
->callee
->ultimate_alias_target (&avail
, caller
);
420 tree caller_tree
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller
->decl
);
422 = callee
? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee
->decl
) : NULL
;
423 /* Check if caller growth allows the inlining. */
424 if (!DECL_DISREGARD_INLINE_LIMITS (callee
->decl
)
426 && !lookup_attribute ("flatten",
427 DECL_ATTRIBUTES (caller
->decl
))
428 && !caller_growth_limits (e
))
430 else if (callee
->externally_visible
431 && !DECL_DISREGARD_INLINE_LIMITS (callee
->decl
)
432 && flag_live_patching
== LIVE_PATCHING_INLINE_ONLY_STATIC
)
434 e
->inline_failed
= CIF_EXTERN_LIVE_ONLY_STATIC
;
437 /* Don't inline a function with a higher optimization level than the
438 caller. FIXME: this is really just tip of iceberg of handling
439 optimization attribute. */
440 else if (caller_tree
!= callee_tree
)
443 (DECL_DISREGARD_INLINE_LIMITS (callee
->decl
)
444 && lookup_attribute ("always_inline",
445 DECL_ATTRIBUTES (callee
->decl
)));
446 ipa_fn_summary
*caller_info
= ipa_fn_summaries
->get (caller
);
447 ipa_fn_summary
*callee_info
= ipa_fn_summaries
->get (callee
);
449 /* Until GCC 4.9 we did not check the semantics-altering flags
450 below and inlined across optimization boundaries.
451 Enabling checks below breaks several packages by refusing
452 to inline library always_inline functions. See PR65873.
453 Disable the check for early inlining for now until better solution
455 if (always_inline
&& early
)
457 /* There are some options that change IL semantics which means
458 we cannot inline in these cases for correctness reason.
459 Not even for always_inline declared functions. */
460 else if (check_match (flag_wrapv
)
461 || check_match (flag_trapv
)
462 || check_match (flag_pcc_struct_return
)
463 /* When caller or callee does FP math, be sure FP codegen flags
465 || ((caller_info
->fp_expressions
&& callee_info
->fp_expressions
)
466 && (check_maybe_up (flag_rounding_math
)
467 || check_maybe_up (flag_trapping_math
)
468 || check_maybe_down (flag_unsafe_math_optimizations
)
469 || check_maybe_down (flag_finite_math_only
)
470 || check_maybe_up (flag_signaling_nans
)
471 || check_maybe_down (flag_cx_limited_range
)
472 || check_maybe_up (flag_signed_zeros
)
473 || check_maybe_down (flag_associative_math
)
474 || check_maybe_down (flag_reciprocal_math
)
475 || check_maybe_down (flag_fp_int_builtin_inexact
)
476 /* Strictly speaking only when the callee contains function
477 calls that may end up setting errno. */
478 || check_maybe_up (flag_errno_math
)))
479 /* We do not want to make code compiled with exceptions to be
480 brought into a non-EH function unless we know that the callee
482 This is tracked by DECL_FUNCTION_PERSONALITY. */
483 || (check_maybe_up (flag_non_call_exceptions
)
484 && DECL_FUNCTION_PERSONALITY (callee
->decl
))
485 || (check_maybe_up (flag_exceptions
)
486 && DECL_FUNCTION_PERSONALITY (callee
->decl
))
487 /* When devirtualization is diabled for callee, it is not safe
488 to inline it as we possibly mangled the type info.
489 Allow early inlining of always inlines. */
490 || (!early
&& check_maybe_down (flag_devirtualize
)))
492 e
->inline_failed
= CIF_OPTIMIZATION_MISMATCH
;
495 /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */
496 else if (always_inline
)
498 /* When user added an attribute to the callee honor it. */
499 else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee
->decl
))
500 && opts_for_fn (caller
->decl
) != opts_for_fn (callee
->decl
))
502 e
->inline_failed
= CIF_OPTIMIZATION_MISMATCH
;
505 /* If explicit optimize attribute are not used, the mismatch is caused
506 by different command line options used to build different units.
507 Do not care about COMDAT functions - those are intended to be
508 optimized with the optimization flags of module they are used in.
509 Also do not care about mixing up size/speed optimization when
510 DECL_DISREGARD_INLINE_LIMITS is set. */
511 else if ((callee
->merged_comdat
512 && !lookup_attribute ("optimize",
513 DECL_ATTRIBUTES (caller
->decl
)))
514 || DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
516 /* If mismatch is caused by merging two LTO units with different
517 optimizationflags we want to be bit nicer. However never inline
518 if one of functions is not optimized at all. */
519 else if (!opt_for_fn (callee
->decl
, optimize
)
520 || !opt_for_fn (caller
->decl
, optimize
))
522 e
->inline_failed
= CIF_OPTIMIZATION_MISMATCH
;
525 /* If callee is optimized for size and caller is not, allow inlining if
526 code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee
527 is inline (and thus likely an unified comdat). This will allow caller
529 else if (opt_for_fn (callee
->decl
, optimize_size
)
530 > opt_for_fn (caller
->decl
, optimize_size
))
532 int growth
= estimate_edge_growth (e
);
533 if (growth
> PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SIZE
)
534 && (!DECL_DECLARED_INLINE_P (callee
->decl
)
535 && growth
>= MAX (MAX_INLINE_INSNS_SINGLE
,
536 MAX_INLINE_INSNS_AUTO
)))
538 e
->inline_failed
= CIF_OPTIMIZATION_MISMATCH
;
542 /* If callee is more aggressively optimized for performance than caller,
543 we generally want to inline only cheap (runtime wise) functions. */
544 else if (opt_for_fn (callee
->decl
, optimize_size
)
545 < opt_for_fn (caller
->decl
, optimize_size
)
546 || (opt_for_fn (callee
->decl
, optimize
)
547 > opt_for_fn (caller
->decl
, optimize
)))
549 if (estimate_edge_time (e
)
550 >= 20 + ipa_call_summaries
->get (e
)->call_stmt_time
)
552 e
->inline_failed
= CIF_OPTIMIZATION_MISMATCH
;
559 if (!inlinable
&& report
)
560 report_inline_failed_reason (e
);
565 /* Return true if the edge E is inlinable during early inlining. */
568 can_early_inline_edge_p (struct cgraph_edge
*e
)
570 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
571 /* Early inliner might get called at WPA stage when IPA pass adds new
572 function. In this case we cannot really do any of early inlining
573 because function bodies are missing. */
574 if (cgraph_inline_failed_type (e
->inline_failed
) == CIF_FINAL_ERROR
)
576 if (!gimple_has_body_p (callee
->decl
))
578 e
->inline_failed
= CIF_BODY_NOT_AVAILABLE
;
581 /* In early inliner some of callees may not be in SSA form yet
582 (i.e. the callgraph is cyclic and we did not process
583 the callee by early inliner, yet). We don't have CIF code for this
584 case; later we will re-do the decision in the real inliner. */
585 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e
->caller
->decl
))
586 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee
->decl
)))
588 if (dump_enabled_p ())
589 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
590 " edge not inlinable: not in SSA form\n");
593 if (!can_inline_edge_p (e
, true, true)
594 || !can_inline_edge_by_limits_p (e
, true, false, true))
600 /* Return number of calls in N. Ignore cheap builtins. */
603 num_calls (struct cgraph_node
*n
)
605 struct cgraph_edge
*e
;
608 for (e
= n
->callees
; e
; e
= e
->next_callee
)
609 if (!is_inexpensive_builtin (e
->callee
->decl
))
615 /* Return true if we are interested in inlining small function. */
618 want_early_inline_function_p (struct cgraph_edge
*e
)
620 bool want_inline
= true;
621 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
623 if (DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
625 /* For AutoFDO, we need to make sure that before profile summary, all
626 hot paths' IR look exactly the same as profiled binary. As a result,
627 in einliner, we will disregard size limit and inline those callsites
629 * inlined in the profiled binary, and
630 * the cloned callee has enough samples to be considered "hot". */
631 else if (flag_auto_profile
&& afdo_callsite_hot_enough_for_early_inline (e
))
633 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
634 && !opt_for_fn (e
->caller
->decl
, flag_inline_small_functions
))
636 e
->inline_failed
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
637 report_inline_failed_reason (e
);
642 int growth
= estimate_edge_growth (e
);
645 if (growth
<= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SIZE
))
647 else if (!e
->maybe_hot_p ())
649 if (dump_enabled_p ())
650 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
651 " will not early inline: %C->%C, "
652 "call is cold and code would grow by %i\n",
657 else if (growth
> PARAM_VALUE (PARAM_EARLY_INLINING_INSNS
))
659 if (dump_enabled_p ())
660 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
661 " will not early inline: %C->%C, "
662 "growth %i exceeds --param early-inlining-insns\n",
667 else if ((n
= num_calls (callee
)) != 0
668 && growth
* (n
+ 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS
))
670 if (dump_enabled_p ())
671 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
672 " will not early inline: %C->%C, "
673 "growth %i exceeds --param early-inlining-insns "
674 "divided by number of calls\n",
683 /* Compute time of the edge->caller + edge->callee execution when inlining
687 compute_uninlined_call_time (struct cgraph_edge
*edge
,
688 sreal uninlined_call_time
)
690 cgraph_node
*caller
= (edge
->caller
->global
.inlined_to
691 ? edge
->caller
->global
.inlined_to
694 sreal freq
= edge
->sreal_frequency ();
696 uninlined_call_time
*= freq
;
698 uninlined_call_time
= uninlined_call_time
>> 11;
700 sreal caller_time
= ipa_fn_summaries
->get (caller
)->time
;
701 return uninlined_call_time
+ caller_time
;
704 /* Same as compute_uinlined_call_time but compute time when inlining
708 compute_inlined_call_time (struct cgraph_edge
*edge
,
711 cgraph_node
*caller
= (edge
->caller
->global
.inlined_to
712 ? edge
->caller
->global
.inlined_to
714 sreal caller_time
= ipa_fn_summaries
->get (caller
)->time
;
716 sreal freq
= edge
->sreal_frequency ();
722 /* This calculation should match one in ipa-inline-analysis.c
723 (estimate_edge_size_and_time). */
724 time
-= (sreal
)ipa_call_summaries
->get (edge
)->call_stmt_time
* freq
;
727 time
= ((sreal
) 1) >> 8;
728 gcc_checking_assert (time
>= 0);
732 /* Return true if the speedup for inlining E is bigger than
733 PARAM_MAX_INLINE_MIN_SPEEDUP. */
736 big_speedup_p (struct cgraph_edge
*e
)
739 sreal spec_time
= estimate_edge_time (e
, &unspec_time
);
740 sreal time
= compute_uninlined_call_time (e
, unspec_time
);
741 sreal inlined_time
= compute_inlined_call_time (e
, spec_time
);
743 if ((time
- inlined_time
) * 100
744 > (sreal
) (time
* PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP
)))
749 /* Return true if we are interested in inlining small function.
750 When REPORT is true, report reason to dump file. */
753 want_inline_small_function_p (struct cgraph_edge
*e
, bool report
)
755 bool want_inline
= true;
756 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
758 /* Allow this function to be called before can_inline_edge_p,
759 since it's usually cheaper. */
760 if (cgraph_inline_failed_type (e
->inline_failed
) == CIF_FINAL_ERROR
)
762 else if (DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
764 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
765 && !opt_for_fn (e
->caller
->decl
, flag_inline_small_functions
))
767 e
->inline_failed
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
770 /* Do fast and conservative check if the function can be good
771 inline candidate. At the moment we allow inline hints to
772 promote non-inline functions to inline and we increase
773 MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
774 else if ((!DECL_DECLARED_INLINE_P (callee
->decl
)
775 && (!e
->count
.ipa ().initialized_p () || !e
->maybe_hot_p ()))
776 && ipa_fn_summaries
->get (callee
)->min_size
777 - ipa_call_summaries
->get (e
)->call_stmt_size
778 > MAX (MAX_INLINE_INSNS_SINGLE
, MAX_INLINE_INSNS_AUTO
))
780 e
->inline_failed
= CIF_MAX_INLINE_INSNS_AUTO_LIMIT
;
783 else if ((DECL_DECLARED_INLINE_P (callee
->decl
)
784 || e
->count
.ipa ().nonzero_p ())
785 && ipa_fn_summaries
->get (callee
)->min_size
786 - ipa_call_summaries
->get (e
)->call_stmt_size
787 > 16 * MAX_INLINE_INSNS_SINGLE
)
789 e
->inline_failed
= (DECL_DECLARED_INLINE_P (callee
->decl
)
790 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
791 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT
);
796 int growth
= estimate_edge_growth (e
);
797 ipa_hints hints
= estimate_edge_hints (e
);
798 int big_speedup
= -1; /* compute this lazily */
800 if (growth
<= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SIZE
))
802 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
803 hints suggests that inlining given function is very profitable. */
804 else if (DECL_DECLARED_INLINE_P (callee
->decl
)
805 && growth
>= MAX_INLINE_INSNS_SINGLE
806 && (growth
>= MAX_INLINE_INSNS_SINGLE
* 16
807 || (!(hints
& (INLINE_HINT_indirect_call
808 | INLINE_HINT_known_hot
809 | INLINE_HINT_loop_iterations
810 | INLINE_HINT_loop_stride
))
811 && !(big_speedup
= big_speedup_p (e
)))))
813 e
->inline_failed
= CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
;
816 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
817 && !opt_for_fn (e
->caller
->decl
, flag_inline_functions
)
818 && growth
>= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SMALL
))
820 /* growth_likely_positive is expensive, always test it last. */
821 if (growth
>= MAX_INLINE_INSNS_SINGLE
822 || growth_likely_positive (callee
, growth
))
824 e
->inline_failed
= CIF_NOT_DECLARED_INLINED
;
828 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
829 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
830 inlining given function is very profitable. */
831 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
832 && !(hints
& INLINE_HINT_known_hot
)
833 && growth
>= ((hints
& (INLINE_HINT_indirect_call
834 | INLINE_HINT_loop_iterations
835 | INLINE_HINT_loop_stride
))
836 ? MAX (MAX_INLINE_INSNS_AUTO
,
837 MAX_INLINE_INSNS_SINGLE
)
838 : MAX_INLINE_INSNS_AUTO
)
839 && !(big_speedup
== -1 ? big_speedup_p (e
) : big_speedup
))
841 /* growth_likely_positive is expensive, always test it last. */
842 if (growth
>= MAX_INLINE_INSNS_SINGLE
843 || growth_likely_positive (callee
, growth
))
845 e
->inline_failed
= CIF_MAX_INLINE_INSNS_AUTO_LIMIT
;
849 /* If call is cold, do not inline when function body would grow. */
850 else if (!e
->maybe_hot_p ()
851 && (growth
>= MAX_INLINE_INSNS_SINGLE
852 || growth_likely_positive (callee
, growth
)))
854 if (e
->count
.ipa () == profile_count::zero ())
855 e
->inline_failed
= CIF_NEVER_CALL
;
857 e
->inline_failed
= CIF_UNLIKELY_CALL
;
861 if (!want_inline
&& report
)
862 report_inline_failed_reason (e
);
866 /* EDGE is self recursive edge.
867 We hand two cases - when function A is inlining into itself
868 or when function A is being inlined into another inliner copy of function
871 In first case OUTER_NODE points to the toplevel copy of A, while
872 in the second case OUTER_NODE points to the outermost copy of A in B.
874 In both cases we want to be extra selective since
875 inlining the call will just introduce new recursive calls to appear. */
878 want_inline_self_recursive_call_p (struct cgraph_edge
*edge
,
879 struct cgraph_node
*outer_node
,
883 char const *reason
= NULL
;
884 bool want_inline
= true;
885 sreal caller_freq
= 1;
886 int max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO
);
888 if (DECL_DECLARED_INLINE_P (edge
->caller
->decl
))
889 max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH
);
891 if (!edge
->maybe_hot_p ())
893 reason
= "recursive call is cold";
896 else if (depth
> max_depth
)
898 reason
= "--param max-inline-recursive-depth exceeded.";
901 else if (outer_node
->global
.inlined_to
902 && (caller_freq
= outer_node
->callers
->sreal_frequency ()) == 0)
904 reason
= "caller frequency is 0";
910 /* Inlining of self recursive function into copy of itself within other
911 function is transformation similar to loop peeling.
913 Peeling is profitable if we can inline enough copies to make probability
914 of actual call to the self recursive function very small. Be sure that
915 the probability of recursion is small.
917 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
918 This way the expected number of recursion is at most max_depth. */
921 sreal max_prob
= (sreal
)1 - ((sreal
)1 / (sreal
)max_depth
);
923 for (i
= 1; i
< depth
; i
++)
924 max_prob
= max_prob
* max_prob
;
925 if (edge
->sreal_frequency () >= max_prob
* caller_freq
)
927 reason
= "frequency of recursive call is too large";
931 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if
932 recursion depth is large. We reduce function call overhead and increase
933 chances that things fit in hardware return predictor.
935 Recursive inlining might however increase cost of stack frame setup
936 actually slowing down functions whose recursion tree is wide rather than
939 Deciding reliably on when to do recursive inlining without profile feedback
940 is tricky. For now we disable recursive inlining when probability of self
943 Recursive inlining of self recursive call within loop also results in
944 large loop depths that generally optimize badly. We may want to throttle
945 down inlining in those cases. In particular this seems to happen in one
946 of libstdc++ rb tree methods. */
949 if (edge
->sreal_frequency () * 100
951 * PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY
))
953 reason
= "frequency of recursive call is too small";
957 if (!want_inline
&& dump_enabled_p ())
958 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, edge
->call_stmt
,
959 " not inlining recursively: %s\n", reason
);
963 /* Return true when NODE has uninlinable caller;
964 set HAS_HOT_CALL if it has hot call.
965 Worker for cgraph_for_node_and_aliases. */
968 check_callers (struct cgraph_node
*node
, void *has_hot_call
)
970 struct cgraph_edge
*e
;
971 for (e
= node
->callers
; e
; e
= e
->next_caller
)
973 if (!opt_for_fn (e
->caller
->decl
, flag_inline_functions_called_once
)
974 || !opt_for_fn (e
->caller
->decl
, optimize
))
976 if (!can_inline_edge_p (e
, true))
978 if (e
->recursive_p ())
980 if (!can_inline_edge_by_limits_p (e
, true))
982 if (!(*(bool *)has_hot_call
) && e
->maybe_hot_p ())
983 *(bool *)has_hot_call
= true;
988 /* If NODE has a caller, return true. */
991 has_caller_p (struct cgraph_node
*node
, void *data ATTRIBUTE_UNUSED
)
998 /* Decide if inlining NODE would reduce unit size by eliminating
999 the offline copy of function.
1000 When COLD is true the cold calls are considered, too. */
1003 want_inline_function_to_all_callers_p (struct cgraph_node
*node
, bool cold
)
1005 bool has_hot_call
= false;
1007 /* Aliases gets inlined along with the function they alias. */
1010 /* Already inlined? */
1011 if (node
->global
.inlined_to
)
1013 /* Does it have callers? */
1014 if (!node
->call_for_symbol_and_aliases (has_caller_p
, NULL
, true))
1016 /* Inlining into all callers would increase size? */
1017 if (estimate_growth (node
) > 0)
1019 /* All inlines must be possible. */
1020 if (node
->call_for_symbol_and_aliases (check_callers
, &has_hot_call
,
1023 if (!cold
&& !has_hot_call
)
1028 /* A cost model driving the inlining heuristics in a way so the edges with
1029 smallest badness are inlined first. After each inlining is performed
1030 the costs of all caller edges of nodes affected are recomputed so the
1031 metrics may accurately depend on values such as number of inlinable callers
1032 of the function or function body size. */
1035 edge_badness (struct cgraph_edge
*edge
, bool dump
)
1039 sreal edge_time
, unspec_edge_time
;
1040 struct cgraph_node
*callee
= edge
->callee
->ultimate_alias_target ();
1041 class ipa_fn_summary
*callee_info
= ipa_fn_summaries
->get (callee
);
1043 cgraph_node
*caller
= (edge
->caller
->global
.inlined_to
1044 ? edge
->caller
->global
.inlined_to
1047 growth
= estimate_edge_growth (edge
);
1048 edge_time
= estimate_edge_time (edge
, &unspec_edge_time
);
1049 hints
= estimate_edge_hints (edge
);
1050 gcc_checking_assert (edge_time
>= 0);
1051 /* Check that inlined time is better, but tolerate some roundoff issues.
1052 FIXME: When callee profile drops to 0 we account calls more. This
1053 should be fixed by never doing that. */
1054 gcc_checking_assert ((edge_time
* 100
1055 - callee_info
->time
* 101).to_int () <= 0
1056 || callee
->count
.ipa ().initialized_p ());
1057 gcc_checking_assert (growth
<= callee_info
->size
);
1061 fprintf (dump_file
, " Badness calculation for %s -> %s\n",
1062 edge
->caller
->dump_name (),
1063 edge
->callee
->dump_name ());
1064 fprintf (dump_file
, " size growth %i, time %f unspec %f ",
1066 edge_time
.to_double (),
1067 unspec_edge_time
.to_double ());
1068 ipa_dump_hints (dump_file
, hints
);
1069 if (big_speedup_p (edge
))
1070 fprintf (dump_file
, " big_speedup");
1071 fprintf (dump_file
, "\n");
1074 /* Always prefer inlining saving code size. */
1077 badness
= (sreal
) (-SREAL_MIN_SIG
+ growth
) << (SREAL_MAX_EXP
/ 256);
1079 fprintf (dump_file
, " %f: Growth %d <= 0\n", badness
.to_double (),
1082 /* Inlining into EXTERNAL functions is not going to change anything unless
1083 they are themselves inlined. */
1084 else if (DECL_EXTERNAL (caller
->decl
))
1087 fprintf (dump_file
, " max: function is external\n");
1088 return sreal::max ();
1090 /* When profile is available. Compute badness as:
1092 time_saved * caller_count
1093 goodness = -------------------------------------------------
1094 growth_of_caller * overall_growth * combined_size
1096 badness = - goodness
1098 Again use negative value to make calls with profile appear hotter
1101 else if (opt_for_fn (caller
->decl
, flag_guess_branch_prob
)
1102 || caller
->count
.ipa ().nonzero_p ())
1104 sreal numerator
, denominator
;
1106 sreal inlined_time
= compute_inlined_call_time (edge
, edge_time
);
1108 numerator
= (compute_uninlined_call_time (edge
, unspec_edge_time
)
1111 numerator
= ((sreal
) 1 >> 8);
1112 if (caller
->count
.ipa ().nonzero_p ())
1113 numerator
*= caller
->count
.ipa ().to_gcov_type ();
1114 else if (caller
->count
.ipa ().initialized_p ())
1115 numerator
= numerator
>> 11;
1116 denominator
= growth
;
1118 overall_growth
= callee_info
->growth
;
1120 /* Look for inliner wrappers of the form:
1126 noninline_callee ();
1128 Withhout panilizing this case, we usually inline noninline_callee
1129 into the inline_caller because overall_growth is small preventing
1130 further inlining of inline_caller.
1132 Penalize only callgraph edges to functions with small overall
1135 if (growth
> overall_growth
1136 /* ... and having only one caller which is not inlined ... */
1137 && callee_info
->single_caller
1138 && !edge
->caller
->global
.inlined_to
1139 /* ... and edges executed only conditionally ... */
1140 && edge
->sreal_frequency () < 1
1141 /* ... consider case where callee is not inline but caller is ... */
1142 && ((!DECL_DECLARED_INLINE_P (edge
->callee
->decl
)
1143 && DECL_DECLARED_INLINE_P (caller
->decl
))
1144 /* ... or when early optimizers decided to split and edge
1145 frequency still indicates splitting is a win ... */
1146 || (callee
->split_part
&& !caller
->split_part
1147 && edge
->sreal_frequency () * 100
1149 (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY
)
1150 /* ... and do not overwrite user specified hints. */
1151 && (!DECL_DECLARED_INLINE_P (edge
->callee
->decl
)
1152 || DECL_DECLARED_INLINE_P (caller
->decl
)))))
1154 ipa_fn_summary
*caller_info
= ipa_fn_summaries
->get (caller
);
1155 int caller_growth
= caller_info
->growth
;
1157 /* Only apply the penalty when caller looks like inline candidate,
1158 and it is not called once and. */
1159 if (!caller_info
->single_caller
&& overall_growth
< caller_growth
1160 && caller_info
->inlinable
1161 && caller_info
->size
1162 < (DECL_DECLARED_INLINE_P (caller
->decl
)
1163 ? MAX_INLINE_INSNS_SINGLE
: MAX_INLINE_INSNS_AUTO
))
1167 " Wrapper penalty. Increasing growth %i to %i\n",
1168 overall_growth
, caller_growth
);
1169 overall_growth
= caller_growth
;
1172 if (overall_growth
> 0)
1174 /* Strongly preffer functions with few callers that can be inlined
1175 fully. The square root here leads to smaller binaries at average.
1176 Watch however for extreme cases and return to linear function
1177 when growth is large. */
1178 if (overall_growth
< 256)
1179 overall_growth
*= overall_growth
;
1181 overall_growth
+= 256 * 256 - 256;
1182 denominator
*= overall_growth
;
1184 denominator
*= ipa_fn_summaries
->get (caller
)->self_size
+ growth
;
1186 badness
= - numerator
/ denominator
;
1191 " %f: guessed profile. frequency %f, count %" PRId64
1192 " caller count %" PRId64
1193 " time w/o inlining %f, time with inlining %f"
1194 " overall growth %i (current) %i (original)"
1195 " %i (compensated)\n",
1196 badness
.to_double (),
1197 edge
->sreal_frequency ().to_double (),
1198 edge
->count
.ipa ().initialized_p () ? edge
->count
.ipa ().to_gcov_type () : -1,
1199 caller
->count
.ipa ().initialized_p () ? caller
->count
.ipa ().to_gcov_type () : -1,
1200 compute_uninlined_call_time (edge
,
1201 unspec_edge_time
).to_double (),
1202 inlined_time
.to_double (),
1203 estimate_growth (callee
),
1204 callee_info
->growth
, overall_growth
);
1207 /* When function local profile is not available or it does not give
1208 useful information (ie frequency is zero), base the cost on
1209 loop nest and overall size growth, so we optimize for overall number
1210 of functions fully inlined in program. */
1213 int nest
= MIN (ipa_call_summaries
->get (edge
)->loop_depth
, 8);
1216 /* Decrease badness if call is nested. */
1218 badness
= badness
>> nest
;
1220 badness
= badness
<< nest
;
1222 fprintf (dump_file
, " %f: no profile. nest %i\n",
1223 badness
.to_double (), nest
);
1225 gcc_checking_assert (badness
!= 0);
1227 if (edge
->recursive_p ())
1228 badness
= badness
.shift (badness
> 0 ? 4 : -4);
1229 if ((hints
& (INLINE_HINT_indirect_call
1230 | INLINE_HINT_loop_iterations
1231 | INLINE_HINT_loop_stride
))
1232 || callee_info
->growth
<= 0)
1233 badness
= badness
.shift (badness
> 0 ? -2 : 2);
1234 if (hints
& (INLINE_HINT_same_scc
))
1235 badness
= badness
.shift (badness
> 0 ? 3 : -3);
1236 else if (hints
& (INLINE_HINT_in_scc
))
1237 badness
= badness
.shift (badness
> 0 ? 2 : -2);
1238 else if (hints
& (INLINE_HINT_cross_module
))
1239 badness
= badness
.shift (badness
> 0 ? 1 : -1);
1240 if (DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
1241 badness
= badness
.shift (badness
> 0 ? -4 : 4);
1242 else if ((hints
& INLINE_HINT_declared_inline
))
1243 badness
= badness
.shift (badness
> 0 ? -3 : 3);
1245 fprintf (dump_file
, " Adjusted by hints %f\n", badness
.to_double ());
1249 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1251 update_edge_key (edge_heap_t
*heap
, struct cgraph_edge
*edge
)
1253 sreal badness
= edge_badness (edge
, false);
1256 edge_heap_node_t
*n
= (edge_heap_node_t
*) edge
->aux
;
1257 gcc_checking_assert (n
->get_data () == edge
);
1259 /* fibonacci_heap::replace_key does busy updating of the
1260 heap that is unnecesarily expensive.
1261 We do lazy increases: after extracting minimum if the key
1262 turns out to be out of date, it is re-inserted into heap
1263 with correct value. */
1264 if (badness
< n
->get_key ())
1266 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1269 " decreasing badness %s -> %s, %f to %f\n",
1270 edge
->caller
->dump_name (),
1271 edge
->callee
->dump_name (),
1272 n
->get_key ().to_double (),
1273 badness
.to_double ());
1275 heap
->decrease_key (n
, badness
);
1280 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1283 " enqueuing call %s -> %s, badness %f\n",
1284 edge
->caller
->dump_name (),
1285 edge
->callee
->dump_name (),
1286 badness
.to_double ());
1288 edge
->aux
= heap
->insert (badness
, edge
);
1293 /* NODE was inlined.
1294 All caller edges needs to be resetted because
1295 size estimates change. Similarly callees needs reset
1296 because better context may be known. */
1299 reset_edge_caches (struct cgraph_node
*node
)
1301 struct cgraph_edge
*edge
;
1302 struct cgraph_edge
*e
= node
->callees
;
1303 struct cgraph_node
*where
= node
;
1304 struct ipa_ref
*ref
;
1306 if (where
->global
.inlined_to
)
1307 where
= where
->global
.inlined_to
;
1309 if (edge_growth_cache
!= NULL
)
1310 for (edge
= where
->callers
; edge
; edge
= edge
->next_caller
)
1311 if (edge
->inline_failed
)
1312 edge_growth_cache
->remove (edge
);
1314 FOR_EACH_ALIAS (where
, ref
)
1315 reset_edge_caches (dyn_cast
<cgraph_node
*> (ref
->referring
));
1321 if (!e
->inline_failed
&& e
->callee
->callees
)
1322 e
= e
->callee
->callees
;
1325 if (edge_growth_cache
!= NULL
&& e
->inline_failed
)
1326 edge_growth_cache
->remove (e
);
1333 if (e
->caller
== node
)
1335 e
= e
->caller
->callers
;
1337 while (!e
->next_callee
);
1343 /* Recompute HEAP nodes for each of caller of NODE.
1344 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1345 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1346 it is inlinable. Otherwise check all edges. */
1349 update_caller_keys (edge_heap_t
*heap
, struct cgraph_node
*node
,
1350 bitmap updated_nodes
,
1351 struct cgraph_edge
*check_inlinablity_for
)
1353 struct cgraph_edge
*edge
;
1354 struct ipa_ref
*ref
;
1356 if ((!node
->alias
&& !ipa_fn_summaries
->get (node
)->inlinable
)
1357 || node
->global
.inlined_to
)
1359 if (!bitmap_set_bit (updated_nodes
, node
->get_uid ()))
1362 FOR_EACH_ALIAS (node
, ref
)
1364 struct cgraph_node
*alias
= dyn_cast
<cgraph_node
*> (ref
->referring
);
1365 update_caller_keys (heap
, alias
, updated_nodes
, check_inlinablity_for
);
1368 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1369 if (edge
->inline_failed
)
1371 if (!check_inlinablity_for
1372 || check_inlinablity_for
== edge
)
1374 if (can_inline_edge_p (edge
, false)
1375 && want_inline_small_function_p (edge
, false)
1376 && can_inline_edge_by_limits_p (edge
, false))
1377 update_edge_key (heap
, edge
);
1380 report_inline_failed_reason (edge
);
1381 heap
->delete_node ((edge_heap_node_t
*) edge
->aux
);
1386 update_edge_key (heap
, edge
);
1390 /* Recompute HEAP nodes for each uninlined call in NODE.
1391 This is used when we know that edge badnesses are going only to increase
1392 (we introduced new call site) and thus all we need is to insert newly
1393 created edges into heap. */
1396 update_callee_keys (edge_heap_t
*heap
, struct cgraph_node
*node
,
1397 bitmap updated_nodes
)
1399 struct cgraph_edge
*e
= node
->callees
;
1404 if (!e
->inline_failed
&& e
->callee
->callees
)
1405 e
= e
->callee
->callees
;
1408 enum availability avail
;
1409 struct cgraph_node
*callee
;
1410 /* We do not reset callee growth cache here. Since we added a new call,
1411 growth chould have just increased and consequentely badness metric
1412 don't need updating. */
1413 if (e
->inline_failed
1414 && (callee
= e
->callee
->ultimate_alias_target (&avail
, e
->caller
))
1415 && ipa_fn_summaries
->get (callee
) != NULL
1416 && ipa_fn_summaries
->get (callee
)->inlinable
1417 && avail
>= AVAIL_AVAILABLE
1418 && !bitmap_bit_p (updated_nodes
, callee
->get_uid ()))
1420 if (can_inline_edge_p (e
, false)
1421 && want_inline_small_function_p (e
, false)
1422 && can_inline_edge_by_limits_p (e
, false))
1423 update_edge_key (heap
, e
);
1426 report_inline_failed_reason (e
);
1427 heap
->delete_node ((edge_heap_node_t
*) e
->aux
);
1437 if (e
->caller
== node
)
1439 e
= e
->caller
->callers
;
1441 while (!e
->next_callee
);
1447 /* Enqueue all recursive calls from NODE into priority queue depending on
1448 how likely we want to recursively inline the call. */
1451 lookup_recursive_calls (struct cgraph_node
*node
, struct cgraph_node
*where
,
1454 struct cgraph_edge
*e
;
1455 enum availability avail
;
1457 for (e
= where
->callees
; e
; e
= e
->next_callee
)
1458 if (e
->callee
== node
1459 || (e
->callee
->ultimate_alias_target (&avail
, e
->caller
) == node
1460 && avail
> AVAIL_INTERPOSABLE
))
1461 heap
->insert (-e
->sreal_frequency (), e
);
1462 for (e
= where
->callees
; e
; e
= e
->next_callee
)
1463 if (!e
->inline_failed
)
1464 lookup_recursive_calls (node
, e
->callee
, heap
);
1467 /* Decide on recursive inlining: in the case function has recursive calls,
1468 inline until body size reaches given argument. If any new indirect edges
1469 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1473 recursive_inlining (struct cgraph_edge
*edge
,
1474 vec
<cgraph_edge
*> *new_edges
)
1476 int limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO
);
1477 edge_heap_t
heap (sreal::min ());
1478 struct cgraph_node
*node
;
1479 struct cgraph_edge
*e
;
1480 struct cgraph_node
*master_clone
= NULL
, *next
;
1484 node
= edge
->caller
;
1485 if (node
->global
.inlined_to
)
1486 node
= node
->global
.inlined_to
;
1488 if (DECL_DECLARED_INLINE_P (node
->decl
))
1489 limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE
);
1491 /* Make sure that function is small enough to be considered for inlining. */
1492 if (estimate_size_after_inlining (node
, edge
) >= limit
)
1494 lookup_recursive_calls (node
, node
, &heap
);
1500 " Performing recursive inlining on %s\n",
1503 /* Do the inlining and update list of recursive call during process. */
1504 while (!heap
.empty ())
1506 struct cgraph_edge
*curr
= heap
.extract_min ();
1507 struct cgraph_node
*cnode
, *dest
= curr
->callee
;
1509 if (!can_inline_edge_p (curr
, true)
1510 || !can_inline_edge_by_limits_p (curr
, true))
1513 /* MASTER_CLONE is produced in the case we already started modified
1514 the function. Be sure to redirect edge to the original body before
1515 estimating growths otherwise we will be seeing growths after inlining
1516 the already modified body. */
1519 curr
->redirect_callee (master_clone
);
1520 if (edge_growth_cache
!= NULL
)
1521 edge_growth_cache
->remove (curr
);
1524 if (estimate_size_after_inlining (node
, curr
) > limit
)
1526 curr
->redirect_callee (dest
);
1527 if (edge_growth_cache
!= NULL
)
1528 edge_growth_cache
->remove (curr
);
1533 for (cnode
= curr
->caller
;
1534 cnode
->global
.inlined_to
; cnode
= cnode
->callers
->caller
)
1536 == curr
->callee
->ultimate_alias_target ()->decl
)
1539 if (!want_inline_self_recursive_call_p (curr
, node
, false, depth
))
1541 curr
->redirect_callee (dest
);
1542 if (edge_growth_cache
!= NULL
)
1543 edge_growth_cache
->remove (curr
);
1550 " Inlining call of depth %i", depth
);
1551 if (node
->count
.nonzero_p ())
1553 fprintf (dump_file
, " called approx. %.2f times per call",
1554 (double)curr
->count
.to_gcov_type ()
1555 / node
->count
.to_gcov_type ());
1557 fprintf (dump_file
, "\n");
1561 /* We need original clone to copy around. */
1562 master_clone
= node
->create_clone (node
->decl
, node
->count
,
1563 false, vNULL
, true, NULL
, NULL
);
1564 for (e
= master_clone
->callees
; e
; e
= e
->next_callee
)
1565 if (!e
->inline_failed
)
1566 clone_inlined_nodes (e
, true, false, NULL
);
1567 curr
->redirect_callee (master_clone
);
1568 if (edge_growth_cache
!= NULL
)
1569 edge_growth_cache
->remove (curr
);
1572 inline_call (curr
, false, new_edges
, &overall_size
, true);
1573 lookup_recursive_calls (node
, curr
->callee
, &heap
);
1577 if (!heap
.empty () && dump_file
)
1578 fprintf (dump_file
, " Recursive inlining growth limit met.\n");
1583 if (dump_enabled_p ())
1584 dump_printf_loc (MSG_NOTE
, edge
->call_stmt
,
1585 "\n Inlined %i times, "
1586 "body grown from size %i to %i, time %f to %f\n", n
,
1587 ipa_fn_summaries
->get (master_clone
)->size
,
1588 ipa_fn_summaries
->get (node
)->size
,
1589 ipa_fn_summaries
->get (master_clone
)->time
.to_double (),
1590 ipa_fn_summaries
->get (node
)->time
.to_double ());
1592 /* Remove master clone we used for inlining. We rely that clones inlined
1593 into master clone gets queued just before master clone so we don't
1595 for (node
= symtab
->first_function (); node
!= master_clone
;
1598 next
= symtab
->next_function (node
);
1599 if (node
->global
.inlined_to
== master_clone
)
1602 master_clone
->remove ();
1607 /* Given whole compilation unit estimate of INSNS, compute how large we can
1608 allow the unit to grow. */
1611 compute_max_insns (int insns
)
1613 int max_insns
= insns
;
1614 if (max_insns
< PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
))
1615 max_insns
= PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
);
1617 return ((int64_t) max_insns
1618 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH
)) / 100);
1622 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1625 add_new_edges_to_heap (edge_heap_t
*heap
, vec
<cgraph_edge
*> new_edges
)
1627 while (new_edges
.length () > 0)
1629 struct cgraph_edge
*edge
= new_edges
.pop ();
1631 gcc_assert (!edge
->aux
);
1632 gcc_assert (edge
->callee
);
1633 if (edge
->inline_failed
1634 && can_inline_edge_p (edge
, true)
1635 && want_inline_small_function_p (edge
, true)
1636 && can_inline_edge_by_limits_p (edge
, true))
1637 edge
->aux
= heap
->insert (edge_badness (edge
, false), edge
);
1641 /* Remove EDGE from the fibheap. */
1644 heap_edge_removal_hook (struct cgraph_edge
*e
, void *data
)
1648 ((edge_heap_t
*)data
)->delete_node ((edge_heap_node_t
*)e
->aux
);
1653 /* Return true if speculation of edge E seems useful.
1654 If ANTICIPATE_INLINING is true, be conservative and hope that E
1658 speculation_useful_p (struct cgraph_edge
*e
, bool anticipate_inlining
)
1660 /* If we have already decided to inline the edge, it seems useful. */
1661 if (!e
->inline_failed
)
1664 enum availability avail
;
1665 struct cgraph_node
*target
= e
->callee
->ultimate_alias_target (&avail
,
1667 struct cgraph_edge
*direct
, *indirect
;
1668 struct ipa_ref
*ref
;
1670 gcc_assert (e
->speculative
&& !e
->indirect_unknown_callee
);
1672 if (!e
->maybe_hot_p ())
1675 /* See if IP optimizations found something potentially useful about the
1676 function. For now we look only for CONST/PURE flags. Almost everything
1677 else we propagate is useless. */
1678 if (avail
>= AVAIL_AVAILABLE
)
1680 int ecf_flags
= flags_from_decl_or_type (target
->decl
);
1681 if (ecf_flags
& ECF_CONST
)
1683 e
->speculative_call_info (direct
, indirect
, ref
);
1684 if (!(indirect
->indirect_info
->ecf_flags
& ECF_CONST
))
1687 else if (ecf_flags
& ECF_PURE
)
1689 e
->speculative_call_info (direct
, indirect
, ref
);
1690 if (!(indirect
->indirect_info
->ecf_flags
& ECF_PURE
))
1694 /* If we did not managed to inline the function nor redirect
1695 to an ipa-cp clone (that are seen by having local flag set),
1696 it is probably pointless to inline it unless hardware is missing
1697 indirect call predictor. */
1698 if (!anticipate_inlining
&& !target
->local
.local
)
1700 /* For overwritable targets there is not much to do. */
1701 if (!can_inline_edge_p (e
, false)
1702 || !can_inline_edge_by_limits_p (e
, false, true))
1704 /* OK, speculation seems interesting. */
1708 /* We know that EDGE is not going to be inlined.
1709 See if we can remove speculation. */
1712 resolve_noninline_speculation (edge_heap_t
*edge_heap
, struct cgraph_edge
*edge
)
1714 if (edge
->speculative
&& !speculation_useful_p (edge
, false))
1716 struct cgraph_node
*node
= edge
->caller
;
1717 struct cgraph_node
*where
= node
->global
.inlined_to
1718 ? node
->global
.inlined_to
: node
;
1719 auto_bitmap updated_nodes
;
1721 if (edge
->count
.ipa ().initialized_p ())
1722 spec_rem
+= edge
->count
.ipa ();
1723 edge
->resolve_speculation ();
1724 reset_edge_caches (where
);
1725 ipa_update_overall_fn_summary (where
);
1726 update_caller_keys (edge_heap
, where
,
1727 updated_nodes
, NULL
);
1728 update_callee_keys (edge_heap
, where
,
1733 /* Return true if NODE should be accounted for overall size estimate.
1734 Skip all nodes optimized for size so we can measure the growth of hot
1735 part of program no matter of the padding. */
1738 inline_account_function_p (struct cgraph_node
*node
)
1740 return (!DECL_EXTERNAL (node
->decl
)
1741 && !opt_for_fn (node
->decl
, optimize_size
)
1742 && node
->frequency
!= NODE_FREQUENCY_UNLIKELY_EXECUTED
);
1745 /* Count number of callers of NODE and store it into DATA (that
1746 points to int. Worker for cgraph_for_node_and_aliases. */
1749 sum_callers (struct cgraph_node
*node
, void *data
)
1751 struct cgraph_edge
*e
;
1752 int *num_calls
= (int *)data
;
1754 for (e
= node
->callers
; e
; e
= e
->next_caller
)
1759 /* We only propagate across edges with non-interposable callee. */
1762 ignore_edge_p (struct cgraph_edge
*e
)
1764 enum availability avail
;
1765 e
->callee
->function_or_virtual_thunk_symbol (&avail
, e
->caller
);
1766 return (avail
<= AVAIL_INTERPOSABLE
);
1769 /* We use greedy algorithm for inlining of small functions:
1770 All inline candidates are put into prioritized heap ordered in
1773 The inlining of small functions is bounded by unit growth parameters. */
1776 inline_small_functions (void)
1778 struct cgraph_node
*node
;
1779 struct cgraph_edge
*edge
;
1780 edge_heap_t
edge_heap (sreal::min ());
1781 auto_bitmap updated_nodes
;
1782 int min_size
, max_size
;
1783 auto_vec
<cgraph_edge
*> new_indirect_edges
;
1784 int initial_size
= 0;
1785 struct cgraph_node
**order
= XCNEWVEC (cgraph_node
*, symtab
->cgraph_count
);
1786 struct cgraph_edge_hook_list
*edge_removal_hook_holder
;
1787 new_indirect_edges
.create (8);
1789 edge_removal_hook_holder
1790 = symtab
->add_edge_removal_hook (&heap_edge_removal_hook
, &edge_heap
);
1792 /* Compute overall unit size and other global parameters used by badness
1795 max_count
= profile_count::uninitialized ();
1796 ipa_reduced_postorder (order
, true, ignore_edge_p
);
1799 FOR_EACH_DEFINED_FUNCTION (node
)
1800 if (!node
->global
.inlined_to
)
1802 if (!node
->alias
&& node
->analyzed
1803 && (node
->has_gimple_body_p () || node
->thunk
.thunk_p
)
1804 && opt_for_fn (node
->decl
, optimize
))
1806 class ipa_fn_summary
*info
= ipa_fn_summaries
->get (node
);
1807 struct ipa_dfs_info
*dfs
= (struct ipa_dfs_info
*) node
->aux
;
1809 /* Do not account external functions, they will be optimized out
1810 if not inlined. Also only count the non-cold portion of program. */
1811 if (inline_account_function_p (node
))
1812 initial_size
+= info
->size
;
1813 info
->growth
= estimate_growth (node
);
1816 node
->call_for_symbol_and_aliases (sum_callers
, &num_calls
,
1819 info
->single_caller
= true;
1820 if (dfs
&& dfs
->next_cycle
)
1822 struct cgraph_node
*n2
;
1823 int id
= dfs
->scc_no
+ 1;
1825 n2
= ((struct ipa_dfs_info
*) n2
->aux
)->next_cycle
)
1826 if (opt_for_fn (n2
->decl
, optimize
))
1828 ipa_fn_summary
*info2
= ipa_fn_summaries
->get (n2
);
1836 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1837 max_count
= max_count
.max (edge
->count
.ipa ());
1839 ipa_free_postorder_info ();
1841 = new call_summary
<edge_growth_cache_entry
*> (symtab
, false);
1845 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1848 overall_size
= initial_size
;
1849 max_size
= compute_max_insns (overall_size
);
1850 min_size
= overall_size
;
1852 /* Populate the heap with all edges we might inline. */
1854 FOR_EACH_DEFINED_FUNCTION (node
)
1856 bool update
= false;
1857 struct cgraph_edge
*next
= NULL
;
1858 bool has_speculative
= false;
1860 if (!opt_for_fn (node
->decl
, optimize
))
1864 fprintf (dump_file
, "Enqueueing calls in %s.\n", node
->dump_name ());
1866 for (edge
= node
->callees
; edge
; edge
= next
)
1868 next
= edge
->next_callee
;
1869 if (edge
->inline_failed
1871 && can_inline_edge_p (edge
, true)
1872 && want_inline_small_function_p (edge
, true)
1873 && can_inline_edge_by_limits_p (edge
, true)
1874 && edge
->inline_failed
)
1876 gcc_assert (!edge
->aux
);
1877 update_edge_key (&edge_heap
, edge
);
1879 if (edge
->speculative
)
1880 has_speculative
= true;
1882 if (has_speculative
)
1883 for (edge
= node
->callees
; edge
; edge
= next
)
1884 if (edge
->speculative
&& !speculation_useful_p (edge
,
1887 edge
->resolve_speculation ();
1892 struct cgraph_node
*where
= node
->global
.inlined_to
1893 ? node
->global
.inlined_to
: node
;
1894 ipa_update_overall_fn_summary (where
);
1895 reset_edge_caches (where
);
1896 update_caller_keys (&edge_heap
, where
,
1897 updated_nodes
, NULL
);
1898 update_callee_keys (&edge_heap
, where
,
1900 bitmap_clear (updated_nodes
);
1904 gcc_assert (in_lto_p
1906 || (profile_info
&& flag_branch_probabilities
));
1908 while (!edge_heap
.empty ())
1910 int old_size
= overall_size
;
1911 struct cgraph_node
*where
, *callee
;
1912 sreal badness
= edge_heap
.min_key ();
1913 sreal current_badness
;
1916 edge
= edge_heap
.extract_min ();
1917 gcc_assert (edge
->aux
);
1919 if (!edge
->inline_failed
|| !edge
->callee
->analyzed
)
1923 /* Be sure that caches are maintained consistent.
1924 This check is affected by scaling roundoff errors when compiling for
1925 IPA this we skip it in that case. */
1926 if (!edge
->callee
->count
.ipa_p ()
1927 && (!max_count
.initialized_p () || !max_count
.nonzero_p ()))
1929 sreal cached_badness
= edge_badness (edge
, false);
1931 int old_size_est
= estimate_edge_size (edge
);
1932 sreal old_time_est
= estimate_edge_time (edge
);
1933 int old_hints_est
= estimate_edge_hints (edge
);
1935 if (edge_growth_cache
!= NULL
)
1936 edge_growth_cache
->remove (edge
);
1937 gcc_assert (old_size_est
== estimate_edge_size (edge
));
1938 gcc_assert (old_time_est
== estimate_edge_time (edge
));
1941 gcc_assert (old_hints_est == estimate_edge_hints (edge));
1943 fails with profile feedback because some hints depends on
1944 maybe_hot_edge_p predicate and because callee gets inlined to other
1945 calls, the edge may become cold.
1946 This ought to be fixed by computing relative probabilities
1947 for given invocation but that will be better done once whole
1948 code is converted to sreals. Disable for now and revert to "wrong"
1949 value so enable/disable checking paths agree. */
1950 edge_growth_cache
->get (edge
)->hints
= old_hints_est
+ 1;
1952 /* When updating the edge costs, we only decrease badness in the keys.
1953 Increases of badness are handled lazilly; when we see key with out
1954 of date value on it, we re-insert it now. */
1955 current_badness
= edge_badness (edge
, false);
1956 gcc_assert (cached_badness
== current_badness
);
1957 gcc_assert (current_badness
>= badness
);
1960 current_badness
= edge_badness (edge
, false);
1962 current_badness
= edge_badness (edge
, false);
1964 if (current_badness
!= badness
)
1966 if (edge_heap
.min () && current_badness
> edge_heap
.min_key ())
1968 edge
->aux
= edge_heap
.insert (current_badness
, edge
);
1972 badness
= current_badness
;
1975 if (!can_inline_edge_p (edge
, true)
1976 || !can_inline_edge_by_limits_p (edge
, true))
1978 resolve_noninline_speculation (&edge_heap
, edge
);
1982 callee
= edge
->callee
->ultimate_alias_target ();
1983 growth
= estimate_edge_growth (edge
);
1987 "\nConsidering %s with %i size\n",
1988 callee
->dump_name (),
1989 ipa_fn_summaries
->get (callee
)->size
);
1991 " to be inlined into %s in %s:%i\n"
1992 " Estimated badness is %f, frequency %.2f.\n",
1993 edge
->caller
->dump_name (),
1995 && (LOCATION_LOCUS (gimple_location ((const gimple
*)
1997 > BUILTINS_LOCATION
)
1998 ? gimple_filename ((const gimple
*) edge
->call_stmt
)
2001 ? gimple_lineno ((const gimple
*) edge
->call_stmt
)
2003 badness
.to_double (),
2004 edge
->sreal_frequency ().to_double ());
2005 if (edge
->count
.ipa ().initialized_p ())
2007 fprintf (dump_file
, " Called ");
2008 edge
->count
.ipa ().dump (dump_file
);
2009 fprintf (dump_file
, " times\n");
2011 if (dump_flags
& TDF_DETAILS
)
2012 edge_badness (edge
, true);
2015 if (overall_size
+ growth
> max_size
2016 && !DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
2018 edge
->inline_failed
= CIF_INLINE_UNIT_GROWTH_LIMIT
;
2019 report_inline_failed_reason (edge
);
2020 resolve_noninline_speculation (&edge_heap
, edge
);
2024 if (!want_inline_small_function_p (edge
, true))
2026 resolve_noninline_speculation (&edge_heap
, edge
);
2030 /* Heuristics for inlining small functions work poorly for
2031 recursive calls where we do effects similar to loop unrolling.
2032 When inlining such edge seems profitable, leave decision on
2033 specific inliner. */
2034 if (edge
->recursive_p ())
2036 where
= edge
->caller
;
2037 if (where
->global
.inlined_to
)
2038 where
= where
->global
.inlined_to
;
2039 if (!recursive_inlining (edge
,
2040 opt_for_fn (edge
->caller
->decl
,
2041 flag_indirect_inlining
)
2042 ? &new_indirect_edges
: NULL
))
2044 edge
->inline_failed
= CIF_RECURSIVE_INLINING
;
2045 resolve_noninline_speculation (&edge_heap
, edge
);
2048 reset_edge_caches (where
);
2049 /* Recursive inliner inlines all recursive calls of the function
2050 at once. Consequently we need to update all callee keys. */
2051 if (opt_for_fn (edge
->caller
->decl
, flag_indirect_inlining
))
2052 add_new_edges_to_heap (&edge_heap
, new_indirect_edges
);
2053 update_callee_keys (&edge_heap
, where
, updated_nodes
);
2054 bitmap_clear (updated_nodes
);
2058 struct cgraph_node
*outer_node
= NULL
;
2061 /* Consider the case where self recursive function A is inlined
2062 into B. This is desired optimization in some cases, since it
2063 leads to effect similar of loop peeling and we might completely
2064 optimize out the recursive call. However we must be extra
2067 where
= edge
->caller
;
2068 while (where
->global
.inlined_to
)
2070 if (where
->decl
== callee
->decl
)
2071 outer_node
= where
, depth
++;
2072 where
= where
->callers
->caller
;
2075 && !want_inline_self_recursive_call_p (edge
, outer_node
,
2079 = (DECL_DISREGARD_INLINE_LIMITS (edge
->callee
->decl
)
2080 ? CIF_RECURSIVE_INLINING
: CIF_UNSPECIFIED
);
2081 resolve_noninline_speculation (&edge_heap
, edge
);
2084 else if (depth
&& dump_file
)
2085 fprintf (dump_file
, " Peeling recursion with depth %i\n", depth
);
2087 gcc_checking_assert (!callee
->global
.inlined_to
);
2088 inline_call (edge
, true, &new_indirect_edges
, &overall_size
, true);
2089 add_new_edges_to_heap (&edge_heap
, new_indirect_edges
);
2091 reset_edge_caches (edge
->callee
);
2093 update_callee_keys (&edge_heap
, where
, updated_nodes
);
2095 where
= edge
->caller
;
2096 if (where
->global
.inlined_to
)
2097 where
= where
->global
.inlined_to
;
2099 /* Our profitability metric can depend on local properties
2100 such as number of inlinable calls and size of the function body.
2101 After inlining these properties might change for the function we
2102 inlined into (since it's body size changed) and for the functions
2103 called by function we inlined (since number of it inlinable callers
2105 update_caller_keys (&edge_heap
, where
, updated_nodes
, NULL
);
2106 /* Offline copy count has possibly changed, recompute if profile is
2108 struct cgraph_node
*n
= cgraph_node::get (edge
->callee
->decl
);
2109 if (n
!= edge
->callee
&& n
->analyzed
&& n
->count
.ipa ().initialized_p ())
2110 update_callee_keys (&edge_heap
, n
, updated_nodes
);
2111 bitmap_clear (updated_nodes
);
2113 if (dump_enabled_p ())
2115 ipa_fn_summary
*s
= ipa_fn_summaries
->get (edge
->caller
);
2117 /* dump_printf can't handle %+i. */
2118 char buf_net_change
[100];
2119 snprintf (buf_net_change
, sizeof buf_net_change
, "%+i",
2120 overall_size
- old_size
);
2122 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, edge
->call_stmt
,
2123 " Inlined %C into %C which now has time %f and "
2124 "size %i, net change of %s.\n",
2125 edge
->callee
, edge
->caller
,
2126 s
->time
.to_double (), s
->size
, buf_net_change
);
2128 if (min_size
> overall_size
)
2130 min_size
= overall_size
;
2131 max_size
= compute_max_insns (min_size
);
2134 fprintf (dump_file
, "New minimal size reached: %i\n", min_size
);
2138 free_growth_caches ();
2139 if (dump_enabled_p ())
2140 dump_printf (MSG_NOTE
,
2141 "Unit growth for small function inlining: %i->%i (%i%%)\n",
2142 initial_size
, overall_size
,
2143 initial_size
? overall_size
* 100 / (initial_size
) - 100: 0);
2144 symtab
->remove_edge_removal_hook (edge_removal_hook_holder
);
2147 /* Flatten NODE. Performed both during early inlining and
2148 at IPA inlining time. */
2151 flatten_function (struct cgraph_node
*node
, bool early
, bool update
)
2153 struct cgraph_edge
*e
;
2155 /* We shouldn't be called recursively when we are being processed. */
2156 gcc_assert (node
->aux
== NULL
);
2158 node
->aux
= (void *) node
;
2160 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2162 struct cgraph_node
*orig_callee
;
2163 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
2165 /* We've hit cycle? It is time to give up. */
2168 if (dump_enabled_p ())
2169 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
2170 "Not inlining %C into %C to avoid cycle.\n",
2172 if (cgraph_inline_failed_type (e
->inline_failed
) != CIF_FINAL_ERROR
)
2173 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
2177 /* When the edge is already inlined, we just need to recurse into
2178 it in order to fully flatten the leaves. */
2179 if (!e
->inline_failed
)
2181 flatten_function (callee
, early
, false);
2185 /* Flatten attribute needs to be processed during late inlining. For
2186 extra code quality we however do flattening during early optimization,
2189 ? !can_inline_edge_p (e
, true)
2190 && !can_inline_edge_by_limits_p (e
, true)
2191 : !can_early_inline_edge_p (e
))
2194 if (e
->recursive_p ())
2196 if (dump_enabled_p ())
2197 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
2198 "Not inlining: recursive call.\n");
2202 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node
->decl
))
2203 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee
->decl
)))
2205 if (dump_enabled_p ())
2206 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
2207 "Not inlining: SSA form does not match.\n");
2211 /* Inline the edge and flatten the inline clone. Avoid
2212 recursing through the original node if the node was cloned. */
2213 if (dump_enabled_p ())
2214 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, e
->call_stmt
,
2215 " Inlining %C into %C.\n",
2217 orig_callee
= callee
;
2218 inline_call (e
, true, NULL
, NULL
, false);
2219 if (e
->callee
!= orig_callee
)
2220 orig_callee
->aux
= (void *) node
;
2221 flatten_function (e
->callee
, early
, false);
2222 if (e
->callee
!= orig_callee
)
2223 orig_callee
->aux
= NULL
;
2228 ipa_update_overall_fn_summary (node
->global
.inlined_to
2229 ? node
->global
.inlined_to
: node
);
2232 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
2233 DATA points to number of calls originally found so we avoid infinite
2237 inline_to_all_callers_1 (struct cgraph_node
*node
, void *data
,
2238 hash_set
<cgraph_node
*> *callers
)
2240 int *num_calls
= (int *)data
;
2241 bool callee_removed
= false;
2243 while (node
->callers
&& !node
->global
.inlined_to
)
2245 struct cgraph_node
*caller
= node
->callers
->caller
;
2247 if (!can_inline_edge_p (node
->callers
, true)
2248 || !can_inline_edge_by_limits_p (node
->callers
, true)
2249 || node
->callers
->recursive_p ())
2252 fprintf (dump_file
, "Uninlinable call found; giving up.\n");
2259 cgraph_node
*ultimate
= node
->ultimate_alias_target ();
2261 "\nInlining %s size %i.\n",
2263 ipa_fn_summaries
->get (ultimate
)->size
);
2265 " Called once from %s %i insns.\n",
2266 node
->callers
->caller
->name (),
2267 ipa_fn_summaries
->get (node
->callers
->caller
)->size
);
2270 /* Remember which callers we inlined to, delaying updating the
2272 callers
->add (node
->callers
->caller
);
2273 inline_call (node
->callers
, true, NULL
, NULL
, false, &callee_removed
);
2276 " Inlined into %s which now has %i size\n",
2278 ipa_fn_summaries
->get (caller
)->size
);
2279 if (!(*num_calls
)--)
2282 fprintf (dump_file
, "New calls found; giving up.\n");
2283 return callee_removed
;
2291 /* Wrapper around inline_to_all_callers_1 doing delayed overall summary
2295 inline_to_all_callers (struct cgraph_node
*node
, void *data
)
2297 hash_set
<cgraph_node
*> callers
;
2298 bool res
= inline_to_all_callers_1 (node
, data
, &callers
);
2299 /* Perform the delayed update of the overall summary of all callers
2300 processed. This avoids quadratic behavior in the cases where
2301 we have a lot of calls to the same function. */
2302 for (hash_set
<cgraph_node
*>::iterator i
= callers
.begin ();
2303 i
!= callers
.end (); ++i
)
2304 ipa_update_overall_fn_summary (*i
);
2308 /* Output overall time estimate. */
2310 dump_overall_stats (void)
2312 sreal sum_weighted
= 0, sum
= 0;
2313 struct cgraph_node
*node
;
2315 FOR_EACH_DEFINED_FUNCTION (node
)
2316 if (!node
->global
.inlined_to
2319 ipa_fn_summary
*s
= ipa_fn_summaries
->get (node
);
2323 if (node
->count
.ipa ().initialized_p ())
2324 sum_weighted
+= s
->time
* node
->count
.ipa ().to_gcov_type ();
2327 fprintf (dump_file
, "Overall time estimate: "
2328 "%f weighted by profile: "
2329 "%f\n", sum
.to_double (), sum_weighted
.to_double ());
2332 /* Output some useful stats about inlining. */
2335 dump_inline_stats (void)
2337 int64_t inlined_cnt
= 0, inlined_indir_cnt
= 0;
2338 int64_t inlined_virt_cnt
= 0, inlined_virt_indir_cnt
= 0;
2339 int64_t noninlined_cnt
= 0, noninlined_indir_cnt
= 0;
2340 int64_t noninlined_virt_cnt
= 0, noninlined_virt_indir_cnt
= 0;
2341 int64_t inlined_speculative
= 0, inlined_speculative_ply
= 0;
2342 int64_t indirect_poly_cnt
= 0, indirect_cnt
= 0;
2343 int64_t reason
[CIF_N_REASONS
][2];
2344 sreal reason_freq
[CIF_N_REASONS
];
2346 struct cgraph_node
*node
;
2348 memset (reason
, 0, sizeof (reason
));
2349 for (i
=0; i
< CIF_N_REASONS
; i
++)
2351 FOR_EACH_DEFINED_FUNCTION (node
)
2353 struct cgraph_edge
*e
;
2354 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2356 if (e
->inline_failed
)
2358 if (e
->count
.ipa ().initialized_p ())
2359 reason
[(int) e
->inline_failed
][0] += e
->count
.ipa ().to_gcov_type ();
2360 reason_freq
[(int) e
->inline_failed
] += e
->sreal_frequency ();
2361 reason
[(int) e
->inline_failed
][1] ++;
2362 if (DECL_VIRTUAL_P (e
->callee
->decl
)
2363 && e
->count
.ipa ().initialized_p ())
2365 if (e
->indirect_inlining_edge
)
2366 noninlined_virt_indir_cnt
+= e
->count
.ipa ().to_gcov_type ();
2368 noninlined_virt_cnt
+= e
->count
.ipa ().to_gcov_type ();
2370 else if (e
->count
.ipa ().initialized_p ())
2372 if (e
->indirect_inlining_edge
)
2373 noninlined_indir_cnt
+= e
->count
.ipa ().to_gcov_type ();
2375 noninlined_cnt
+= e
->count
.ipa ().to_gcov_type ();
2378 else if (e
->count
.ipa ().initialized_p ())
2382 if (DECL_VIRTUAL_P (e
->callee
->decl
))
2383 inlined_speculative_ply
+= e
->count
.ipa ().to_gcov_type ();
2385 inlined_speculative
+= e
->count
.ipa ().to_gcov_type ();
2387 else if (DECL_VIRTUAL_P (e
->callee
->decl
))
2389 if (e
->indirect_inlining_edge
)
2390 inlined_virt_indir_cnt
+= e
->count
.ipa ().to_gcov_type ();
2392 inlined_virt_cnt
+= e
->count
.ipa ().to_gcov_type ();
2396 if (e
->indirect_inlining_edge
)
2397 inlined_indir_cnt
+= e
->count
.ipa ().to_gcov_type ();
2399 inlined_cnt
+= e
->count
.ipa ().to_gcov_type ();
2403 for (e
= node
->indirect_calls
; e
; e
= e
->next_callee
)
2404 if (e
->indirect_info
->polymorphic
2405 & e
->count
.ipa ().initialized_p ())
2406 indirect_poly_cnt
+= e
->count
.ipa ().to_gcov_type ();
2407 else if (e
->count
.ipa ().initialized_p ())
2408 indirect_cnt
+= e
->count
.ipa ().to_gcov_type ();
2410 if (max_count
.initialized_p ())
2413 "Inlined %" PRId64
" + speculative "
2414 "%" PRId64
" + speculative polymorphic "
2415 "%" PRId64
" + previously indirect "
2416 "%" PRId64
" + virtual "
2417 "%" PRId64
" + virtual and previously indirect "
2418 "%" PRId64
"\n" "Not inlined "
2419 "%" PRId64
" + previously indirect "
2420 "%" PRId64
" + virtual "
2421 "%" PRId64
" + virtual and previously indirect "
2422 "%" PRId64
" + stil indirect "
2423 "%" PRId64
" + still indirect polymorphic "
2424 "%" PRId64
"\n", inlined_cnt
,
2425 inlined_speculative
, inlined_speculative_ply
,
2426 inlined_indir_cnt
, inlined_virt_cnt
, inlined_virt_indir_cnt
,
2427 noninlined_cnt
, noninlined_indir_cnt
, noninlined_virt_cnt
,
2428 noninlined_virt_indir_cnt
, indirect_cnt
, indirect_poly_cnt
);
2429 fprintf (dump_file
, "Removed speculations ");
2430 spec_rem
.dump (dump_file
);
2431 fprintf (dump_file
, "\n");
2433 dump_overall_stats ();
2434 fprintf (dump_file
, "\nWhy inlining failed?\n");
2435 for (i
= 0; i
< CIF_N_REASONS
; i
++)
2437 fprintf (dump_file
, "%-50s: %8i calls, %8f freq, %" PRId64
" count\n",
2438 cgraph_inline_failed_string ((cgraph_inline_failed_t
) i
),
2439 (int) reason
[i
][1], reason_freq
[i
].to_double (), reason
[i
][0]);
2442 /* Called when node is removed. */
2445 flatten_remove_node_hook (struct cgraph_node
*node
, void *data
)
2447 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node
->decl
)) == NULL
)
2450 hash_set
<struct cgraph_node
*> *removed
2451 = (hash_set
<struct cgraph_node
*> *) data
;
2452 removed
->add (node
);
2455 /* Decide on the inlining. We do so in the topological order to avoid
2456 expenses on updating data structures. */
2461 struct cgraph_node
*node
;
2463 struct cgraph_node
**order
;
2466 bool remove_functions
= false;
2468 order
= XCNEWVEC (struct cgraph_node
*, symtab
->cgraph_count
);
2471 ipa_dump_fn_summaries (dump_file
);
2473 nnodes
= ipa_reverse_postorder (order
);
2474 spec_rem
= profile_count::zero ();
2476 FOR_EACH_FUNCTION (node
)
2480 /* Recompute the default reasons for inlining because they may have
2481 changed during merging. */
2484 for (cgraph_edge
*e
= node
->callees
; e
; e
= e
->next_callee
)
2486 gcc_assert (e
->inline_failed
);
2487 initialize_inline_failed (e
);
2489 for (cgraph_edge
*e
= node
->indirect_calls
; e
; e
= e
->next_callee
)
2490 initialize_inline_failed (e
);
2495 fprintf (dump_file
, "\nFlattening functions:\n");
2497 /* First shrink order array, so that it only contains nodes with
2498 flatten attribute. */
2499 for (i
= nnodes
- 1, j
= i
; i
>= 0; i
--)
2502 if (lookup_attribute ("flatten",
2503 DECL_ATTRIBUTES (node
->decl
)) != NULL
)
2504 order
[j
--] = order
[i
];
2507 /* After the above loop, order[j + 1] ... order[nnodes - 1] contain
2508 nodes with flatten attribute. If there is more than one such
2509 node, we need to register a node removal hook, as flatten_function
2510 could remove other nodes with flatten attribute. See PR82801. */
2511 struct cgraph_node_hook_list
*node_removal_hook_holder
= NULL
;
2512 hash_set
<struct cgraph_node
*> *flatten_removed_nodes
= NULL
;
2515 flatten_removed_nodes
= new hash_set
<struct cgraph_node
*>;
2516 node_removal_hook_holder
2517 = symtab
->add_cgraph_removal_hook (&flatten_remove_node_hook
,
2518 flatten_removed_nodes
);
2521 /* In the first pass handle functions to be flattened. Do this with
2522 a priority so none of our later choices will make this impossible. */
2523 for (i
= nnodes
- 1; i
> j
; i
--)
2526 if (flatten_removed_nodes
2527 && flatten_removed_nodes
->contains (node
))
2530 /* Handle nodes to be flattened.
2531 Ideally when processing callees we stop inlining at the
2532 entry of cycles, possibly cloning that entry point and
2533 try to flatten itself turning it into a self-recursive
2536 fprintf (dump_file
, "Flattening %s\n", node
->name ());
2537 flatten_function (node
, false, true);
2542 symtab
->remove_cgraph_removal_hook (node_removal_hook_holder
);
2543 delete flatten_removed_nodes
;
2548 dump_overall_stats ();
2550 inline_small_functions ();
2552 gcc_assert (symtab
->state
== IPA_SSA
);
2553 symtab
->state
= IPA_SSA_AFTER_INLINING
;
2554 /* Do first after-inlining removal. We want to remove all "stale" extern
2555 inline functions and virtual functions so we really know what is called
2557 symtab
->remove_unreachable_nodes (dump_file
);
2559 /* Inline functions with a property that after inlining into all callers the
2560 code size will shrink because the out-of-line copy is eliminated.
2561 We do this regardless on the callee size as long as function growth limits
2565 "\nDeciding on functions to be inlined into all callers and "
2566 "removing useless speculations:\n");
2568 /* Inlining one function called once has good chance of preventing
2569 inlining other function into the same callee. Ideally we should
2570 work in priority order, but probably inlining hot functions first
2571 is good cut without the extra pain of maintaining the queue.
2573 ??? this is not really fitting the bill perfectly: inlining function
2574 into callee often leads to better optimization of callee due to
2575 increased context for optimization.
2576 For example if main() function calls a function that outputs help
2577 and then function that does the main optmization, we should inline
2578 the second with priority even if both calls are cold by themselves.
2580 We probably want to implement new predicate replacing our use of
2581 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2583 for (cold
= 0; cold
<= 1; cold
++)
2585 FOR_EACH_DEFINED_FUNCTION (node
)
2587 struct cgraph_edge
*edge
, *next
;
2590 if (!opt_for_fn (node
->decl
, optimize
)
2591 || !opt_for_fn (node
->decl
, flag_inline_functions_called_once
))
2594 for (edge
= node
->callees
; edge
; edge
= next
)
2596 next
= edge
->next_callee
;
2597 if (edge
->speculative
&& !speculation_useful_p (edge
, false))
2599 if (edge
->count
.ipa ().initialized_p ())
2600 spec_rem
+= edge
->count
.ipa ();
2601 edge
->resolve_speculation ();
2603 remove_functions
= true;
2608 struct cgraph_node
*where
= node
->global
.inlined_to
2609 ? node
->global
.inlined_to
: node
;
2610 reset_edge_caches (where
);
2611 ipa_update_overall_fn_summary (where
);
2613 if (want_inline_function_to_all_callers_p (node
, cold
))
2616 node
->call_for_symbol_and_aliases (sum_callers
, &num_calls
,
2618 while (node
->call_for_symbol_and_aliases
2619 (inline_to_all_callers
, &num_calls
, true))
2621 remove_functions
= true;
2626 /* Free ipa-prop structures if they are no longer needed. */
2627 ipa_free_all_structures_after_iinln ();
2629 if (dump_enabled_p ())
2630 dump_printf (MSG_NOTE
,
2631 "\nInlined %i calls, eliminated %i functions\n\n",
2632 ncalls_inlined
, nfunctions_inlined
);
2634 dump_inline_stats ();
2637 ipa_dump_fn_summaries (dump_file
);
2638 return remove_functions
? TODO_remove_functions
: 0;
2641 /* Inline always-inline function calls in NODE. */
2644 inline_always_inline_functions (struct cgraph_node
*node
)
2646 struct cgraph_edge
*e
;
2647 bool inlined
= false;
2649 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2651 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
2652 if (!DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
2655 if (e
->recursive_p ())
2657 if (dump_enabled_p ())
2658 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
2659 " Not inlining recursive call to %C.\n",
2661 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
2665 if (!can_early_inline_edge_p (e
))
2667 /* Set inlined to true if the callee is marked "always_inline" but
2668 is not inlinable. This will allow flagging an error later in
2669 expand_call_inline in tree-inline.c. */
2670 if (lookup_attribute ("always_inline",
2671 DECL_ATTRIBUTES (callee
->decl
)) != NULL
)
2676 if (dump_enabled_p ())
2677 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, e
->call_stmt
,
2678 " Inlining %C into %C (always_inline).\n",
2679 e
->callee
, e
->caller
);
2680 inline_call (e
, true, NULL
, NULL
, false);
2684 ipa_update_overall_fn_summary (node
);
2689 /* Decide on the inlining. We do so in the topological order to avoid
2690 expenses on updating data structures. */
2693 early_inline_small_functions (struct cgraph_node
*node
)
2695 struct cgraph_edge
*e
;
2696 bool inlined
= false;
2698 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2700 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
2702 /* We can enounter not-yet-analyzed function during
2703 early inlining on callgraphs with strongly
2704 connected components. */
2705 ipa_fn_summary
*s
= ipa_fn_summaries
->get (callee
);
2706 if (s
== NULL
|| !s
->inlinable
|| !e
->inline_failed
)
2709 /* Do not consider functions not declared inline. */
2710 if (!DECL_DECLARED_INLINE_P (callee
->decl
)
2711 && !opt_for_fn (node
->decl
, flag_inline_small_functions
)
2712 && !opt_for_fn (node
->decl
, flag_inline_functions
))
2715 if (dump_enabled_p ())
2716 dump_printf_loc (MSG_NOTE
, e
->call_stmt
,
2717 "Considering inline candidate %C.\n",
2720 if (!can_early_inline_edge_p (e
))
2723 if (e
->recursive_p ())
2725 if (dump_enabled_p ())
2726 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, e
->call_stmt
,
2727 " Not inlining: recursive call.\n");
2731 if (!want_early_inline_function_p (e
))
2734 if (dump_enabled_p ())
2735 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, e
->call_stmt
,
2736 " Inlining %C into %C.\n",
2738 inline_call (e
, true, NULL
, NULL
, false);
2743 ipa_update_overall_fn_summary (node
);
2749 early_inliner (function
*fun
)
2751 struct cgraph_node
*node
= cgraph_node::get (current_function_decl
);
2752 struct cgraph_edge
*edge
;
2753 unsigned int todo
= 0;
2755 bool inlined
= false;
2760 /* Do nothing if datastructures for ipa-inliner are already computed. This
2761 happens when some pass decides to construct new function and
2762 cgraph_add_new_function calls lowering passes and early optimization on
2763 it. This may confuse ourself when early inliner decide to inline call to
2764 function clone, because function clones don't have parameter list in
2765 ipa-prop matching their signature. */
2766 if (ipa_node_params_sum
)
2771 node
->remove_all_references ();
2773 /* Even when not optimizing or not inlining inline always-inline
2775 inlined
= inline_always_inline_functions (node
);
2779 || !flag_early_inlining
2780 /* Never inline regular functions into always-inline functions
2781 during incremental inlining. This sucks as functions calling
2782 always inline functions will get less optimized, but at the
2783 same time inlining of functions calling always inline
2784 function into an always inline function might introduce
2785 cycles of edges to be always inlined in the callgraph.
2787 We might want to be smarter and just avoid this type of inlining. */
2788 || (DECL_DISREGARD_INLINE_LIMITS (node
->decl
)
2789 && lookup_attribute ("always_inline",
2790 DECL_ATTRIBUTES (node
->decl
))))
2792 else if (lookup_attribute ("flatten",
2793 DECL_ATTRIBUTES (node
->decl
)) != NULL
)
2795 /* When the function is marked to be flattened, recursively inline
2797 if (dump_enabled_p ())
2798 dump_printf (MSG_OPTIMIZED_LOCATIONS
,
2799 "Flattening %C\n", node
);
2800 flatten_function (node
, true, true);
2805 /* If some always_inline functions was inlined, apply the changes.
2806 This way we will not account always inline into growth limits and
2807 moreover we will inline calls from always inlines that we skipped
2808 previously because of conditional above. */
2811 timevar_push (TV_INTEGRATION
);
2812 todo
|= optimize_inline_calls (current_function_decl
);
2813 /* optimize_inline_calls call above might have introduced new
2814 statements that don't have inline parameters computed. */
2815 for (edge
= node
->callees
; edge
; edge
= edge
->next_callee
)
2817 /* We can enounter not-yet-analyzed function during
2818 early inlining on callgraphs with strongly
2819 connected components. */
2820 ipa_call_summary
*es
= ipa_call_summaries
->get_create (edge
);
2822 = estimate_num_insns (edge
->call_stmt
, &eni_size_weights
);
2824 = estimate_num_insns (edge
->call_stmt
, &eni_time_weights
);
2826 ipa_update_overall_fn_summary (node
);
2828 timevar_pop (TV_INTEGRATION
);
2830 /* We iterate incremental inlining to get trivial cases of indirect
2832 while (iterations
< PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS
)
2833 && early_inline_small_functions (node
))
2835 timevar_push (TV_INTEGRATION
);
2836 todo
|= optimize_inline_calls (current_function_decl
);
2838 /* Technically we ought to recompute inline parameters so the new
2839 iteration of early inliner works as expected. We however have
2840 values approximately right and thus we only need to update edge
2841 info that might be cleared out for newly discovered edges. */
2842 for (edge
= node
->callees
; edge
; edge
= edge
->next_callee
)
2844 /* We have no summary for new bound store calls yet. */
2845 ipa_call_summary
*es
= ipa_call_summaries
->get_create (edge
);
2847 = estimate_num_insns (edge
->call_stmt
, &eni_size_weights
);
2849 = estimate_num_insns (edge
->call_stmt
, &eni_time_weights
);
2851 if (edge
->callee
->decl
2852 && !gimple_check_call_matching_types (
2853 edge
->call_stmt
, edge
->callee
->decl
, false))
2855 edge
->inline_failed
= CIF_MISMATCHED_ARGUMENTS
;
2856 edge
->call_stmt_cannot_inline_p
= true;
2859 if (iterations
< PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS
) - 1)
2860 ipa_update_overall_fn_summary (node
);
2861 timevar_pop (TV_INTEGRATION
);
2866 fprintf (dump_file
, "Iterations: %i\n", iterations
);
2871 timevar_push (TV_INTEGRATION
);
2872 todo
|= optimize_inline_calls (current_function_decl
);
2873 timevar_pop (TV_INTEGRATION
);
2876 fun
->always_inline_functions_inlined
= true;
2881 /* Do inlining of small functions. Doing so early helps profiling and other
2882 passes to be somewhat more effective and avoids some code duplication in
2883 later real inlining pass for testcases with very many function calls. */
2887 const pass_data pass_data_early_inline
=
2889 GIMPLE_PASS
, /* type */
2890 "einline", /* name */
2891 OPTGROUP_INLINE
, /* optinfo_flags */
2892 TV_EARLY_INLINING
, /* tv_id */
2893 PROP_ssa
, /* properties_required */
2894 0, /* properties_provided */
2895 0, /* properties_destroyed */
2896 0, /* todo_flags_start */
2897 0, /* todo_flags_finish */
2900 class pass_early_inline
: public gimple_opt_pass
2903 pass_early_inline (gcc::context
*ctxt
)
2904 : gimple_opt_pass (pass_data_early_inline
, ctxt
)
2907 /* opt_pass methods: */
2908 virtual unsigned int execute (function
*);
2910 }; // class pass_early_inline
2913 pass_early_inline::execute (function
*fun
)
2915 return early_inliner (fun
);
2921 make_pass_early_inline (gcc::context
*ctxt
)
2923 return new pass_early_inline (ctxt
);
2928 const pass_data pass_data_ipa_inline
=
2930 IPA_PASS
, /* type */
2931 "inline", /* name */
2932 OPTGROUP_INLINE
, /* optinfo_flags */
2933 TV_IPA_INLINING
, /* tv_id */
2934 0, /* properties_required */
2935 0, /* properties_provided */
2936 0, /* properties_destroyed */
2937 0, /* todo_flags_start */
2938 ( TODO_dump_symtab
), /* todo_flags_finish */
2941 class pass_ipa_inline
: public ipa_opt_pass_d
2944 pass_ipa_inline (gcc::context
*ctxt
)
2945 : ipa_opt_pass_d (pass_data_ipa_inline
, ctxt
,
2946 NULL
, /* generate_summary */
2947 NULL
, /* write_summary */
2948 NULL
, /* read_summary */
2949 NULL
, /* write_optimization_summary */
2950 NULL
, /* read_optimization_summary */
2951 NULL
, /* stmt_fixup */
2952 0, /* function_transform_todo_flags_start */
2953 inline_transform
, /* function_transform */
2954 NULL
) /* variable_transform */
2957 /* opt_pass methods: */
2958 virtual unsigned int execute (function
*) { return ipa_inline (); }
2960 }; // class pass_ipa_inline
2965 make_pass_ipa_inline (gcc::context
*ctxt
)
2967 return new pass_ipa_inline (ctxt
);