Daily bump.
[official-gcc.git] / gcc / ipa-inline.c
bloba1c312f1774979912c865628967c032469bf1060
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2021 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
29 on).
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
34 inlining.
36 inlining heuristics
38 The inliner itself is split into two passes:
40 pass_early_inlining
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
55 flattening.
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
60 optimizers.
62 Because of lack of whole unit knowledge, the pass cannot really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
68 pass_ipa_inline
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "backend.h"
96 #include "target.h"
97 #include "rtl.h"
98 #include "tree.h"
99 #include "gimple.h"
100 #include "alloc-pool.h"
101 #include "tree-pass.h"
102 #include "gimple-ssa.h"
103 #include "cgraph.h"
104 #include "lto-streamer.h"
105 #include "trans-mem.h"
106 #include "calls.h"
107 #include "tree-inline.h"
108 #include "profile.h"
109 #include "symbol-summary.h"
110 #include "tree-vrp.h"
111 #include "ipa-prop.h"
112 #include "ipa-fnsummary.h"
113 #include "ipa-inline.h"
114 #include "ipa-utils.h"
115 #include "sreal.h"
116 #include "auto-profile.h"
117 #include "builtins.h"
118 #include "fibonacci_heap.h"
119 #include "stringpool.h"
120 #include "attribs.h"
121 #include "asan.h"
123 typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
124 typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
126 /* Statistics we collect about inlining algorithm. */
127 static int overall_size;
128 static profile_count max_count;
129 static profile_count spec_rem;
131 /* Return false when inlining edge E would lead to violating
132 limits on function unit growth or stack usage growth.
134 The relative function body growth limit is present generally
135 to avoid problems with non-linear behavior of the compiler.
136 To allow inlining huge functions into tiny wrapper, the limit
137 is always based on the bigger of the two functions considered.
139 For stack growth limits we always base the growth in stack usage
140 of the callers. We want to prevent applications from segfaulting
141 on stack overflow when functions with huge stack frames gets
142 inlined. */
144 static bool
145 caller_growth_limits (struct cgraph_edge *e)
147 struct cgraph_node *to = e->caller;
148 struct cgraph_node *what = e->callee->ultimate_alias_target ();
149 int newsize;
150 int limit = 0;
151 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
152 ipa_size_summary *outer_info = ipa_size_summaries->get (to);
154 /* Look for function e->caller is inlined to. While doing
155 so work out the largest function body on the way. As
156 described above, we want to base our function growth
157 limits based on that. Not on the self size of the
158 outer function, not on the self size of inline code
159 we immediately inline to. This is the most relaxed
160 interpretation of the rule "do not grow large functions
161 too much in order to prevent compiler from exploding". */
162 while (true)
164 ipa_size_summary *size_info = ipa_size_summaries->get (to);
165 if (limit < size_info->self_size)
166 limit = size_info->self_size;
167 if (stack_size_limit < size_info->estimated_self_stack_size)
168 stack_size_limit = size_info->estimated_self_stack_size;
169 if (to->inlined_to)
170 to = to->callers->caller;
171 else
172 break;
175 ipa_fn_summary *what_info = ipa_fn_summaries->get (what);
176 ipa_size_summary *what_size_info = ipa_size_summaries->get (what);
178 if (limit < what_size_info->self_size)
179 limit = what_size_info->self_size;
181 limit += limit * opt_for_fn (to->decl, param_large_function_growth) / 100;
183 /* Check the size after inlining against the function limits. But allow
184 the function to shrink if it went over the limits by forced inlining. */
185 newsize = estimate_size_after_inlining (to, e);
186 if (newsize >= ipa_size_summaries->get (what)->size
187 && newsize > opt_for_fn (to->decl, param_large_function_insns)
188 && newsize > limit)
190 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
191 return false;
194 if (!what_info->estimated_stack_size)
195 return true;
197 /* FIXME: Stack size limit often prevents inlining in Fortran programs
198 due to large i/o datastructures used by the Fortran front-end.
199 We ought to ignore this limit when we know that the edge is executed
200 on every invocation of the caller (i.e. its call statement dominates
201 exit block). We do not track this information, yet. */
202 stack_size_limit += ((gcov_type)stack_size_limit
203 * opt_for_fn (to->decl, param_stack_frame_growth)
204 / 100);
206 inlined_stack = (ipa_get_stack_frame_offset (to)
207 + outer_info->estimated_self_stack_size
208 + what_info->estimated_stack_size);
209 /* Check new stack consumption with stack consumption at the place
210 stack is used. */
211 if (inlined_stack > stack_size_limit
212 /* If function already has large stack usage from sibling
213 inline call, we can inline, too.
214 This bit overoptimistically assume that we are good at stack
215 packing. */
216 && inlined_stack > ipa_fn_summaries->get (to)->estimated_stack_size
217 && inlined_stack > opt_for_fn (to->decl, param_large_stack_frame))
219 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
220 return false;
222 return true;
225 /* Dump info about why inlining has failed. */
227 static void
228 report_inline_failed_reason (struct cgraph_edge *e)
230 if (dump_enabled_p ())
232 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
233 " not inlinable: %C -> %C, %s\n",
234 e->caller, e->callee,
235 cgraph_inline_failed_string (e->inline_failed));
236 if ((e->inline_failed == CIF_TARGET_OPTION_MISMATCH
237 || e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
238 && e->caller->lto_file_data
239 && e->callee->ultimate_alias_target ()->lto_file_data)
241 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
242 " LTO objects: %s, %s\n",
243 e->caller->lto_file_data->file_name,
244 e->callee->ultimate_alias_target ()->lto_file_data->file_name);
246 if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH)
247 if (dump_file)
248 cl_target_option_print_diff
249 (dump_file, 2, target_opts_for_fn (e->caller->decl),
250 target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
251 if (e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
252 if (dump_file)
253 cl_optimization_print_diff
254 (dump_file, 2, opts_for_fn (e->caller->decl),
255 opts_for_fn (e->callee->ultimate_alias_target ()->decl));
259 /* Decide whether sanitizer-related attributes allow inlining. */
261 static bool
262 sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
264 if (!caller || !callee)
265 return true;
267 /* Follow clang and allow inlining for always_inline functions. */
268 if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (callee)))
269 return true;
271 const sanitize_code codes[] =
273 SANITIZE_ADDRESS,
274 SANITIZE_THREAD,
275 SANITIZE_UNDEFINED,
276 SANITIZE_UNDEFINED_NONDEFAULT,
277 SANITIZE_POINTER_COMPARE,
278 SANITIZE_POINTER_SUBTRACT
281 for (unsigned i = 0; i < sizeof (codes) / sizeof (codes[0]); i++)
282 if (sanitize_flags_p (codes[i], caller)
283 != sanitize_flags_p (codes[i], callee))
284 return false;
286 if (sanitize_coverage_p (caller) != sanitize_coverage_p (callee))
287 return false;
289 return true;
292 /* Used for flags where it is safe to inline when caller's value is
293 grater than callee's. */
294 #define check_maybe_up(flag) \
295 (opts_for_fn (caller->decl)->x_##flag \
296 != opts_for_fn (callee->decl)->x_##flag \
297 && (!always_inline \
298 || opts_for_fn (caller->decl)->x_##flag \
299 < opts_for_fn (callee->decl)->x_##flag))
300 /* Used for flags where it is safe to inline when caller's value is
301 smaller than callee's. */
302 #define check_maybe_down(flag) \
303 (opts_for_fn (caller->decl)->x_##flag \
304 != opts_for_fn (callee->decl)->x_##flag \
305 && (!always_inline \
306 || opts_for_fn (caller->decl)->x_##flag \
307 > opts_for_fn (callee->decl)->x_##flag))
308 /* Used for flags where exact match is needed for correctness. */
309 #define check_match(flag) \
310 (opts_for_fn (caller->decl)->x_##flag \
311 != opts_for_fn (callee->decl)->x_##flag)
313 /* Decide if we can inline the edge and possibly update
314 inline_failed reason.
315 We check whether inlining is possible at all and whether
316 caller growth limits allow doing so.
318 if REPORT is true, output reason to the dump file. */
320 static bool
321 can_inline_edge_p (struct cgraph_edge *e, bool report,
322 bool early = false)
324 gcc_checking_assert (e->inline_failed);
326 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
328 if (report)
329 report_inline_failed_reason (e);
330 return false;
333 bool inlinable = true;
334 enum availability avail;
335 cgraph_node *caller = (e->caller->inlined_to
336 ? e->caller->inlined_to : e->caller);
337 cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller);
339 if (!callee->definition)
341 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
342 inlinable = false;
344 if (!early && (!opt_for_fn (callee->decl, optimize)
345 || !opt_for_fn (caller->decl, optimize)))
347 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
348 inlinable = false;
350 else if (callee->calls_comdat_local)
352 e->inline_failed = CIF_USES_COMDAT_LOCAL;
353 inlinable = false;
355 else if (avail <= AVAIL_INTERPOSABLE)
357 e->inline_failed = CIF_OVERWRITABLE;
358 inlinable = false;
360 /* All edges with call_stmt_cannot_inline_p should have inline_failed
361 initialized to one of FINAL_ERROR reasons. */
362 else if (e->call_stmt_cannot_inline_p)
363 gcc_unreachable ();
364 /* Don't inline if the functions have different EH personalities. */
365 else if (DECL_FUNCTION_PERSONALITY (caller->decl)
366 && DECL_FUNCTION_PERSONALITY (callee->decl)
367 && (DECL_FUNCTION_PERSONALITY (caller->decl)
368 != DECL_FUNCTION_PERSONALITY (callee->decl)))
370 e->inline_failed = CIF_EH_PERSONALITY;
371 inlinable = false;
373 /* TM pure functions should not be inlined into non-TM_pure
374 functions. */
375 else if (is_tm_pure (callee->decl) && !is_tm_pure (caller->decl))
377 e->inline_failed = CIF_UNSPECIFIED;
378 inlinable = false;
380 /* Check compatibility of target optimization options. */
381 else if (!targetm.target_option.can_inline_p (caller->decl,
382 callee->decl))
384 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
385 inlinable = false;
387 else if (ipa_fn_summaries->get (callee) == NULL
388 || !ipa_fn_summaries->get (callee)->inlinable)
390 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
391 inlinable = false;
393 /* Don't inline a function with mismatched sanitization attributes. */
394 else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl))
396 e->inline_failed = CIF_SANITIZE_ATTRIBUTE_MISMATCH;
397 inlinable = false;
400 if (!inlinable && report)
401 report_inline_failed_reason (e);
402 return inlinable;
405 /* Return inlining_insns_single limit for function N. If HINT or HINT2 is true
406 scale up the bound. */
408 static int
409 inline_insns_single (cgraph_node *n, bool hint, bool hint2)
411 if (hint && hint2)
413 int64_t spd = opt_for_fn (n->decl, param_inline_heuristics_hint_percent);
414 spd = spd * spd;
415 if (spd > 1000000)
416 spd = 1000000;
417 return opt_for_fn (n->decl, param_max_inline_insns_single) * spd / 100;
419 if (hint || hint2)
420 return opt_for_fn (n->decl, param_max_inline_insns_single)
421 * opt_for_fn (n->decl, param_inline_heuristics_hint_percent) / 100;
422 return opt_for_fn (n->decl, param_max_inline_insns_single);
425 /* Return inlining_insns_auto limit for function N. If HINT or HINT2 is true
426 scale up the bound. */
428 static int
429 inline_insns_auto (cgraph_node *n, bool hint, bool hint2)
431 int max_inline_insns_auto = opt_for_fn (n->decl, param_max_inline_insns_auto);
432 if (hint && hint2)
434 int64_t spd = opt_for_fn (n->decl, param_inline_heuristics_hint_percent);
435 spd = spd * spd;
436 if (spd > 1000000)
437 spd = 1000000;
438 return max_inline_insns_auto * spd / 100;
440 if (hint || hint2)
441 return max_inline_insns_auto
442 * opt_for_fn (n->decl, param_inline_heuristics_hint_percent) / 100;
443 return max_inline_insns_auto;
446 /* Decide if we can inline the edge and possibly update
447 inline_failed reason.
448 We check whether inlining is possible at all and whether
449 caller growth limits allow doing so.
451 if REPORT is true, output reason to the dump file.
453 if DISREGARD_LIMITS is true, ignore size limits. */
455 static bool
456 can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report,
457 bool disregard_limits = false, bool early = false)
459 gcc_checking_assert (e->inline_failed);
461 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
463 if (report)
464 report_inline_failed_reason (e);
465 return false;
468 bool inlinable = true;
469 enum availability avail;
470 cgraph_node *caller = (e->caller->inlined_to
471 ? e->caller->inlined_to : e->caller);
472 cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller);
473 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl);
474 tree callee_tree
475 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
476 /* Check if caller growth allows the inlining. */
477 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
478 && !disregard_limits
479 && !lookup_attribute ("flatten",
480 DECL_ATTRIBUTES (caller->decl))
481 && !caller_growth_limits (e))
482 inlinable = false;
483 else if (callee->externally_visible
484 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl)
485 && flag_live_patching == LIVE_PATCHING_INLINE_ONLY_STATIC)
487 e->inline_failed = CIF_EXTERN_LIVE_ONLY_STATIC;
488 inlinable = false;
490 /* Don't inline a function with a higher optimization level than the
491 caller. FIXME: this is really just tip of iceberg of handling
492 optimization attribute. */
493 else if (caller_tree != callee_tree)
495 bool always_inline =
496 (DECL_DISREGARD_INLINE_LIMITS (callee->decl)
497 && lookup_attribute ("always_inline",
498 DECL_ATTRIBUTES (callee->decl)));
499 ipa_fn_summary *caller_info = ipa_fn_summaries->get (caller);
500 ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
502 /* Until GCC 4.9 we did not check the semantics-altering flags
503 below and inlined across optimization boundaries.
504 Enabling checks below breaks several packages by refusing
505 to inline library always_inline functions. See PR65873.
506 Disable the check for early inlining for now until better solution
507 is found. */
508 if (always_inline && early)
510 /* There are some options that change IL semantics which means
511 we cannot inline in these cases for correctness reason.
512 Not even for always_inline declared functions. */
513 else if (check_match (flag_wrapv)
514 || check_match (flag_trapv)
515 || check_match (flag_pcc_struct_return)
516 || check_maybe_down (optimize_debug)
517 /* When caller or callee does FP math, be sure FP codegen flags
518 compatible. */
519 || ((caller_info->fp_expressions && callee_info->fp_expressions)
520 && (check_maybe_up (flag_rounding_math)
521 || check_maybe_up (flag_trapping_math)
522 || check_maybe_down (flag_unsafe_math_optimizations)
523 || check_maybe_down (flag_finite_math_only)
524 || check_maybe_up (flag_signaling_nans)
525 || check_maybe_down (flag_cx_limited_range)
526 || check_maybe_up (flag_signed_zeros)
527 || check_maybe_down (flag_associative_math)
528 || check_maybe_down (flag_reciprocal_math)
529 || check_maybe_down (flag_fp_int_builtin_inexact)
530 /* Strictly speaking only when the callee contains function
531 calls that may end up setting errno. */
532 || check_maybe_up (flag_errno_math)))
533 /* We do not want to make code compiled with exceptions to be
534 brought into a non-EH function unless we know that the callee
535 does not throw.
536 This is tracked by DECL_FUNCTION_PERSONALITY. */
537 || (check_maybe_up (flag_non_call_exceptions)
538 && DECL_FUNCTION_PERSONALITY (callee->decl))
539 || (check_maybe_up (flag_exceptions)
540 && DECL_FUNCTION_PERSONALITY (callee->decl))
541 /* When devirtualization is disabled for callee, it is not safe
542 to inline it as we possibly mangled the type info.
543 Allow early inlining of always inlines. */
544 || (!early && check_maybe_down (flag_devirtualize)))
546 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
547 inlinable = false;
549 /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */
550 else if (always_inline)
552 /* When user added an attribute to the callee honor it. */
553 else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee->decl))
554 && opts_for_fn (caller->decl) != opts_for_fn (callee->decl))
556 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
557 inlinable = false;
559 /* If explicit optimize attribute are not used, the mismatch is caused
560 by different command line options used to build different units.
561 Do not care about COMDAT functions - those are intended to be
562 optimized with the optimization flags of module they are used in.
563 Also do not care about mixing up size/speed optimization when
564 DECL_DISREGARD_INLINE_LIMITS is set. */
565 else if ((callee->merged_comdat
566 && !lookup_attribute ("optimize",
567 DECL_ATTRIBUTES (caller->decl)))
568 || DECL_DISREGARD_INLINE_LIMITS (callee->decl))
570 /* If mismatch is caused by merging two LTO units with different
571 optimization flags we want to be bit nicer. However never inline
572 if one of functions is not optimized at all. */
573 else if (!opt_for_fn (callee->decl, optimize)
574 || !opt_for_fn (caller->decl, optimize))
576 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
577 inlinable = false;
579 /* If callee is optimized for size and caller is not, allow inlining if
580 code shrinks or we are in param_max_inline_insns_single limit and
581 callee is inline (and thus likely an unified comdat).
582 This will allow caller to run faster. */
583 else if (opt_for_fn (callee->decl, optimize_size)
584 > opt_for_fn (caller->decl, optimize_size))
586 int growth = estimate_edge_growth (e);
587 if (growth > opt_for_fn (caller->decl, param_max_inline_insns_size)
588 && (!DECL_DECLARED_INLINE_P (callee->decl)
589 && growth >= MAX (inline_insns_single (caller, false, false),
590 inline_insns_auto (caller, false, false))))
592 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
593 inlinable = false;
596 /* If callee is more aggressively optimized for performance than caller,
597 we generally want to inline only cheap (runtime wise) functions. */
598 else if (opt_for_fn (callee->decl, optimize_size)
599 < opt_for_fn (caller->decl, optimize_size)
600 || (opt_for_fn (callee->decl, optimize)
601 > opt_for_fn (caller->decl, optimize)))
603 if (estimate_edge_time (e)
604 >= 20 + ipa_call_summaries->get (e)->call_stmt_time)
606 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
607 inlinable = false;
613 if (!inlinable && report)
614 report_inline_failed_reason (e);
615 return inlinable;
619 /* Return true if the edge E is inlinable during early inlining. */
621 static bool
622 can_early_inline_edge_p (struct cgraph_edge *e)
624 cgraph_node *caller = (e->caller->inlined_to
625 ? e->caller->inlined_to : e->caller);
626 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
627 /* Early inliner might get called at WPA stage when IPA pass adds new
628 function. In this case we cannot really do any of early inlining
629 because function bodies are missing. */
630 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
631 return false;
632 if (!gimple_has_body_p (callee->decl))
634 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
635 return false;
637 /* In early inliner some of callees may not be in SSA form yet
638 (i.e. the callgraph is cyclic and we did not process
639 the callee by early inliner, yet). We don't have CIF code for this
640 case; later we will re-do the decision in the real inliner. */
641 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
642 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
644 if (dump_enabled_p ())
645 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
646 " edge not inlinable: not in SSA form\n");
647 return false;
649 else if (profile_arc_flag
650 && ((lookup_attribute ("no_profile_instrument_function",
651 DECL_ATTRIBUTES (caller->decl)) == NULL_TREE)
652 != (lookup_attribute ("no_profile_instrument_function",
653 DECL_ATTRIBUTES (callee->decl)) == NULL_TREE)))
654 return false;
656 if (!can_inline_edge_p (e, true, true)
657 || !can_inline_edge_by_limits_p (e, true, false, true))
658 return false;
659 return true;
663 /* Return number of calls in N. Ignore cheap builtins. */
665 static int
666 num_calls (struct cgraph_node *n)
668 struct cgraph_edge *e;
669 int num = 0;
671 for (e = n->callees; e; e = e->next_callee)
672 if (!is_inexpensive_builtin (e->callee->decl))
673 num++;
674 return num;
678 /* Return true if we are interested in inlining small function. */
680 static bool
681 want_early_inline_function_p (struct cgraph_edge *e)
683 bool want_inline = true;
684 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
686 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
688 /* For AutoFDO, we need to make sure that before profile summary, all
689 hot paths' IR look exactly the same as profiled binary. As a result,
690 in einliner, we will disregard size limit and inline those callsites
691 that are:
692 * inlined in the profiled binary, and
693 * the cloned callee has enough samples to be considered "hot". */
694 else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e))
696 else if (!DECL_DECLARED_INLINE_P (callee->decl)
697 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
699 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
700 report_inline_failed_reason (e);
701 want_inline = false;
703 else
705 /* First take care of very large functions. */
706 int min_growth = estimate_min_edge_growth (e), growth = 0;
707 int n;
708 int early_inlining_insns = param_early_inlining_insns;
710 if (min_growth > early_inlining_insns)
712 if (dump_enabled_p ())
713 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
714 " will not early inline: %C->%C, "
715 "call is cold and code would grow "
716 "at least by %i\n",
717 e->caller, callee,
718 min_growth);
719 want_inline = false;
721 else
722 growth = estimate_edge_growth (e);
725 if (!want_inline || growth <= param_max_inline_insns_size)
727 else if (!e->maybe_hot_p ())
729 if (dump_enabled_p ())
730 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
731 " will not early inline: %C->%C, "
732 "call is cold and code would grow by %i\n",
733 e->caller, callee,
734 growth);
735 want_inline = false;
737 else if (growth > early_inlining_insns)
739 if (dump_enabled_p ())
740 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
741 " will not early inline: %C->%C, "
742 "growth %i exceeds --param early-inlining-insns\n",
743 e->caller, callee, growth);
744 want_inline = false;
746 else if ((n = num_calls (callee)) != 0
747 && growth * (n + 1) > early_inlining_insns)
749 if (dump_enabled_p ())
750 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
751 " will not early inline: %C->%C, "
752 "growth %i exceeds --param early-inlining-insns "
753 "divided by number of calls\n",
754 e->caller, callee, growth);
755 want_inline = false;
758 return want_inline;
761 /* Compute time of the edge->caller + edge->callee execution when inlining
762 does not happen. */
764 inline sreal
765 compute_uninlined_call_time (struct cgraph_edge *edge,
766 sreal uninlined_call_time,
767 sreal freq)
769 cgraph_node *caller = (edge->caller->inlined_to
770 ? edge->caller->inlined_to
771 : edge->caller);
773 if (freq > 0)
774 uninlined_call_time *= freq;
775 else
776 uninlined_call_time = uninlined_call_time >> 11;
778 sreal caller_time = ipa_fn_summaries->get (caller)->time;
779 return uninlined_call_time + caller_time;
782 /* Same as compute_uinlined_call_time but compute time when inlining
783 does happen. */
785 inline sreal
786 compute_inlined_call_time (struct cgraph_edge *edge,
787 sreal time,
788 sreal freq)
790 cgraph_node *caller = (edge->caller->inlined_to
791 ? edge->caller->inlined_to
792 : edge->caller);
793 sreal caller_time = ipa_fn_summaries->get (caller)->time;
795 if (freq > 0)
796 time *= freq;
797 else
798 time = time >> 11;
800 /* This calculation should match one in ipa-inline-analysis.c
801 (estimate_edge_size_and_time). */
802 time -= (sreal)ipa_call_summaries->get (edge)->call_stmt_time * freq;
803 time += caller_time;
804 if (time <= 0)
805 time = ((sreal) 1) >> 8;
806 gcc_checking_assert (time >= 0);
807 return time;
810 /* Determine time saved by inlining EDGE of frequency FREQ
811 where callee's runtime w/o inlining is UNINLINED_TYPE
812 and with inlined is INLINED_TYPE. */
814 inline sreal
815 inlining_speedup (struct cgraph_edge *edge,
816 sreal freq,
817 sreal uninlined_time,
818 sreal inlined_time)
820 sreal speedup = uninlined_time - inlined_time;
821 /* Handling of call_time should match one in ipa-inline-fnsummary.c
822 (estimate_edge_size_and_time). */
823 sreal call_time = ipa_call_summaries->get (edge)->call_stmt_time;
825 if (freq > 0)
827 speedup = (speedup + call_time);
828 if (freq != 1)
829 speedup = speedup * freq;
831 else if (freq == 0)
832 speedup = speedup >> 11;
833 gcc_checking_assert (speedup >= 0);
834 return speedup;
837 /* Return true if the speedup for inlining E is bigger than
838 param_inline_min_speedup. */
840 static bool
841 big_speedup_p (struct cgraph_edge *e)
843 sreal unspec_time;
844 sreal spec_time = estimate_edge_time (e, &unspec_time);
845 sreal freq = e->sreal_frequency ();
846 sreal time = compute_uninlined_call_time (e, unspec_time, freq);
847 sreal inlined_time = compute_inlined_call_time (e, spec_time, freq);
848 cgraph_node *caller = (e->caller->inlined_to
849 ? e->caller->inlined_to
850 : e->caller);
851 int limit = opt_for_fn (caller->decl, param_inline_min_speedup);
853 if ((time - inlined_time) * 100 > time * limit)
854 return true;
855 return false;
858 /* Return true if we are interested in inlining small function.
859 When REPORT is true, report reason to dump file. */
861 static bool
862 want_inline_small_function_p (struct cgraph_edge *e, bool report)
864 bool want_inline = true;
865 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
866 cgraph_node *to = (e->caller->inlined_to
867 ? e->caller->inlined_to : e->caller);
869 /* Allow this function to be called before can_inline_edge_p,
870 since it's usually cheaper. */
871 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
872 want_inline = false;
873 else if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
875 else if (!DECL_DECLARED_INLINE_P (callee->decl)
876 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
878 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
879 want_inline = false;
881 /* Do fast and conservative check if the function can be good
882 inline candidate. */
883 else if ((!DECL_DECLARED_INLINE_P (callee->decl)
884 && (!e->count.ipa ().initialized_p () || !e->maybe_hot_p ()))
885 && ipa_fn_summaries->get (callee)->min_size
886 - ipa_call_summaries->get (e)->call_stmt_size
887 > inline_insns_auto (e->caller, true, true))
889 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
890 want_inline = false;
892 else if ((DECL_DECLARED_INLINE_P (callee->decl)
893 || e->count.ipa ().nonzero_p ())
894 && ipa_fn_summaries->get (callee)->min_size
895 - ipa_call_summaries->get (e)->call_stmt_size
896 > inline_insns_single (e->caller, true, true))
898 e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
899 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
900 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
901 want_inline = false;
903 else
905 int growth = estimate_edge_growth (e);
906 ipa_hints hints = estimate_edge_hints (e);
907 /* We have two independent groups of hints. If one matches in each
908 of groups the limits are inreased. If both groups matches, limit
909 is increased even more. */
910 bool apply_hints = (hints & (INLINE_HINT_indirect_call
911 | INLINE_HINT_known_hot
912 | INLINE_HINT_loop_iterations
913 | INLINE_HINT_loop_stride));
914 bool apply_hints2 = (hints & INLINE_HINT_builtin_constant_p);
916 if (growth <= opt_for_fn (to->decl,
917 param_max_inline_insns_size))
919 /* Apply param_max_inline_insns_single limit. Do not do so when
920 hints suggests that inlining given function is very profitable.
921 Avoid computation of big_speedup_p when not necessary to change
922 outcome of decision. */
923 else if (DECL_DECLARED_INLINE_P (callee->decl)
924 && growth >= inline_insns_single (e->caller, apply_hints,
925 apply_hints2)
926 && (apply_hints || apply_hints2
927 || growth >= inline_insns_single (e->caller, true,
928 apply_hints2)
929 || !big_speedup_p (e)))
931 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
932 want_inline = false;
934 else if (!DECL_DECLARED_INLINE_P (callee->decl)
935 && !opt_for_fn (e->caller->decl, flag_inline_functions)
936 && growth >= opt_for_fn (to->decl,
937 param_max_inline_insns_small))
939 /* growth_positive_p is expensive, always test it last. */
940 if (growth >= inline_insns_single (e->caller, false, false)
941 || growth_positive_p (callee, e, growth))
943 e->inline_failed = CIF_NOT_DECLARED_INLINED;
944 want_inline = false;
947 /* Apply param_max_inline_insns_auto limit for functions not declared
948 inline. Bypass the limit when speedup seems big. */
949 else if (!DECL_DECLARED_INLINE_P (callee->decl)
950 && growth >= inline_insns_auto (e->caller, apply_hints,
951 apply_hints2)
952 && (apply_hints || apply_hints2
953 || growth >= inline_insns_auto (e->caller, true,
954 apply_hints2)
955 || !big_speedup_p (e)))
957 /* growth_positive_p is expensive, always test it last. */
958 if (growth >= inline_insns_single (e->caller, false, false)
959 || growth_positive_p (callee, e, growth))
961 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
962 want_inline = false;
965 /* If call is cold, do not inline when function body would grow. */
966 else if (!e->maybe_hot_p ()
967 && (growth >= inline_insns_single (e->caller, false, false)
968 || growth_positive_p (callee, e, growth)))
970 e->inline_failed = CIF_UNLIKELY_CALL;
971 want_inline = false;
974 if (!want_inline && report)
975 report_inline_failed_reason (e);
976 return want_inline;
979 /* EDGE is self recursive edge.
980 We handle two cases - when function A is inlining into itself
981 or when function A is being inlined into another inliner copy of function
982 A within function B.
984 In first case OUTER_NODE points to the toplevel copy of A, while
985 in the second case OUTER_NODE points to the outermost copy of A in B.
987 In both cases we want to be extra selective since
988 inlining the call will just introduce new recursive calls to appear. */
990 static bool
991 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
992 struct cgraph_node *outer_node,
993 bool peeling,
994 int depth)
996 char const *reason = NULL;
997 bool want_inline = true;
998 sreal caller_freq = 1;
999 int max_depth = opt_for_fn (outer_node->decl,
1000 param_max_inline_recursive_depth_auto);
1002 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
1003 max_depth = opt_for_fn (outer_node->decl,
1004 param_max_inline_recursive_depth);
1006 if (!edge->maybe_hot_p ())
1008 reason = "recursive call is cold";
1009 want_inline = false;
1011 else if (depth > max_depth)
1013 reason = "--param max-inline-recursive-depth exceeded.";
1014 want_inline = false;
1016 else if (outer_node->inlined_to
1017 && (caller_freq = outer_node->callers->sreal_frequency ()) == 0)
1019 reason = "caller frequency is 0";
1020 want_inline = false;
1023 if (!want_inline)
1025 /* Inlining of self recursive function into copy of itself within other
1026 function is transformation similar to loop peeling.
1028 Peeling is profitable if we can inline enough copies to make probability
1029 of actual call to the self recursive function very small. Be sure that
1030 the probability of recursion is small.
1032 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
1033 This way the expected number of recursion is at most max_depth. */
1034 else if (peeling)
1036 sreal max_prob = (sreal)1 - ((sreal)1 / (sreal)max_depth);
1037 int i;
1038 for (i = 1; i < depth; i++)
1039 max_prob = max_prob * max_prob;
1040 if (edge->sreal_frequency () >= max_prob * caller_freq)
1042 reason = "frequency of recursive call is too large";
1043 want_inline = false;
1046 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if
1047 recursion depth is large. We reduce function call overhead and increase
1048 chances that things fit in hardware return predictor.
1050 Recursive inlining might however increase cost of stack frame setup
1051 actually slowing down functions whose recursion tree is wide rather than
1052 deep.
1054 Deciding reliably on when to do recursive inlining without profile feedback
1055 is tricky. For now we disable recursive inlining when probability of self
1056 recursion is low.
1058 Recursive inlining of self recursive call within loop also results in
1059 large loop depths that generally optimize badly. We may want to throttle
1060 down inlining in those cases. In particular this seems to happen in one
1061 of libstdc++ rb tree methods. */
1062 else
1064 if (edge->sreal_frequency () * 100
1065 <= caller_freq
1066 * opt_for_fn (outer_node->decl,
1067 param_min_inline_recursive_probability))
1069 reason = "frequency of recursive call is too small";
1070 want_inline = false;
1073 if (!want_inline && dump_enabled_p ())
1074 dump_printf_loc (MSG_MISSED_OPTIMIZATION, edge->call_stmt,
1075 " not inlining recursively: %s\n", reason);
1076 return want_inline;
1079 /* Return true when NODE has uninlinable caller;
1080 set HAS_HOT_CALL if it has hot call.
1081 Worker for cgraph_for_node_and_aliases. */
1083 static bool
1084 check_callers (struct cgraph_node *node, void *has_hot_call)
1086 struct cgraph_edge *e;
1087 for (e = node->callers; e; e = e->next_caller)
1089 if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once)
1090 || !opt_for_fn (e->caller->decl, optimize))
1091 return true;
1092 if (!can_inline_edge_p (e, true))
1093 return true;
1094 if (e->recursive_p ())
1095 return true;
1096 if (!can_inline_edge_by_limits_p (e, true))
1097 return true;
1098 /* Inlining large functions to large loop depth is often harmful because
1099 of register pressure it implies. */
1100 if ((int)ipa_call_summaries->get (e)->loop_depth
1101 > param_inline_functions_called_once_loop_depth)
1102 return true;
1103 /* Do not produce gigantic functions. */
1104 if (estimate_size_after_inlining (e->caller->inlined_to ?
1105 e->caller->inlined_to : e->caller, e)
1106 > param_inline_functions_called_once_insns)
1107 return true;
1108 if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
1109 *(bool *)has_hot_call = true;
1111 return false;
1114 /* If NODE has a caller, return true. */
1116 static bool
1117 has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
1119 if (node->callers)
1120 return true;
1121 return false;
1124 /* Decide if inlining NODE would reduce unit size by eliminating
1125 the offline copy of function.
1126 When COLD is true the cold calls are considered, too. */
1128 static bool
1129 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
1131 bool has_hot_call = false;
1133 /* Aliases gets inlined along with the function they alias. */
1134 if (node->alias)
1135 return false;
1136 /* Already inlined? */
1137 if (node->inlined_to)
1138 return false;
1139 /* Does it have callers? */
1140 if (!node->call_for_symbol_and_aliases (has_caller_p, NULL, true))
1141 return false;
1142 /* Inlining into all callers would increase size? */
1143 if (growth_positive_p (node, NULL, INT_MIN) > 0)
1144 return false;
1145 /* All inlines must be possible. */
1146 if (node->call_for_symbol_and_aliases (check_callers, &has_hot_call,
1147 true))
1148 return false;
1149 if (!cold && !has_hot_call)
1150 return false;
1151 return true;
1154 /* Return true if WHERE of SIZE is a possible candidate for wrapper heuristics
1155 in estimate_edge_badness. */
1157 static bool
1158 wrapper_heuristics_may_apply (struct cgraph_node *where, int size)
1160 return size < (DECL_DECLARED_INLINE_P (where->decl)
1161 ? inline_insns_single (where, false, false)
1162 : inline_insns_auto (where, false, false));
1165 /* A cost model driving the inlining heuristics in a way so the edges with
1166 smallest badness are inlined first. After each inlining is performed
1167 the costs of all caller edges of nodes affected are recomputed so the
1168 metrics may accurately depend on values such as number of inlinable callers
1169 of the function or function body size. */
1171 static sreal
1172 edge_badness (struct cgraph_edge *edge, bool dump)
1174 sreal badness;
1175 int growth;
1176 sreal edge_time, unspec_edge_time;
1177 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1178 class ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
1179 ipa_hints hints;
1180 cgraph_node *caller = (edge->caller->inlined_to
1181 ? edge->caller->inlined_to
1182 : edge->caller);
1184 growth = estimate_edge_growth (edge);
1185 edge_time = estimate_edge_time (edge, &unspec_edge_time);
1186 hints = estimate_edge_hints (edge);
1187 gcc_checking_assert (edge_time >= 0);
1188 /* Check that inlined time is better, but tolerate some roundoff issues.
1189 FIXME: When callee profile drops to 0 we account calls more. This
1190 should be fixed by never doing that. */
1191 gcc_checking_assert ((edge_time * 100
1192 - callee_info->time * 101).to_int () <= 0
1193 || callee->count.ipa ().initialized_p ());
1194 gcc_checking_assert (growth <= ipa_size_summaries->get (callee)->size);
1196 if (dump)
1198 fprintf (dump_file, " Badness calculation for %s -> %s\n",
1199 edge->caller->dump_name (),
1200 edge->callee->dump_name ());
1201 fprintf (dump_file, " size growth %i, time %f unspec %f ",
1202 growth,
1203 edge_time.to_double (),
1204 unspec_edge_time.to_double ());
1205 ipa_dump_hints (dump_file, hints);
1206 if (big_speedup_p (edge))
1207 fprintf (dump_file, " big_speedup");
1208 fprintf (dump_file, "\n");
1211 /* Always prefer inlining saving code size. */
1212 if (growth <= 0)
1214 badness = (sreal) (-SREAL_MIN_SIG + growth) << (SREAL_MAX_EXP / 256);
1215 if (dump)
1216 fprintf (dump_file, " %f: Growth %d <= 0\n", badness.to_double (),
1217 growth);
1219 /* Inlining into EXTERNAL functions is not going to change anything unless
1220 they are themselves inlined. */
1221 else if (DECL_EXTERNAL (caller->decl))
1223 if (dump)
1224 fprintf (dump_file, " max: function is external\n");
1225 return sreal::max ();
1227 /* When profile is available. Compute badness as:
1229 time_saved * caller_count
1230 goodness = -------------------------------------------------
1231 growth_of_caller * overall_growth * combined_size
1233 badness = - goodness
1235 Again use negative value to make calls with profile appear hotter
1236 then calls without.
1238 else if (opt_for_fn (caller->decl, flag_guess_branch_prob)
1239 || caller->count.ipa ().nonzero_p ())
1241 sreal numerator, denominator;
1242 int overall_growth;
1243 sreal freq = edge->sreal_frequency ();
1245 numerator = inlining_speedup (edge, freq, unspec_edge_time, edge_time);
1246 if (numerator <= 0)
1247 numerator = ((sreal) 1 >> 8);
1248 if (caller->count.ipa ().nonzero_p ())
1249 numerator *= caller->count.ipa ().to_gcov_type ();
1250 else if (caller->count.ipa ().initialized_p ())
1251 numerator = numerator >> 11;
1252 denominator = growth;
1254 overall_growth = callee_info->growth;
1256 /* Look for inliner wrappers of the form:
1258 inline_caller ()
1260 do_fast_job...
1261 if (need_more_work)
1262 noninline_callee ();
1264 Without penalizing this case, we usually inline noninline_callee
1265 into the inline_caller because overall_growth is small preventing
1266 further inlining of inline_caller.
1268 Penalize only callgraph edges to functions with small overall
1269 growth ...
1271 if (growth > overall_growth
1272 /* ... and having only one caller which is not inlined ... */
1273 && callee_info->single_caller
1274 && !edge->caller->inlined_to
1275 /* ... and edges executed only conditionally ... */
1276 && freq < 1
1277 /* ... consider case where callee is not inline but caller is ... */
1278 && ((!DECL_DECLARED_INLINE_P (edge->callee->decl)
1279 && DECL_DECLARED_INLINE_P (caller->decl))
1280 /* ... or when early optimizers decided to split and edge
1281 frequency still indicates splitting is a win ... */
1282 || (callee->split_part && !caller->split_part
1283 && freq * 100
1284 < opt_for_fn (caller->decl,
1285 param_partial_inlining_entry_probability)
1286 /* ... and do not overwrite user specified hints. */
1287 && (!DECL_DECLARED_INLINE_P (edge->callee->decl)
1288 || DECL_DECLARED_INLINE_P (caller->decl)))))
1290 ipa_fn_summary *caller_info = ipa_fn_summaries->get (caller);
1291 int caller_growth = caller_info->growth;
1293 /* Only apply the penalty when caller looks like inline candidate,
1294 and it is not called once. */
1295 if (!caller_info->single_caller && overall_growth < caller_growth
1296 && caller_info->inlinable
1297 && wrapper_heuristics_may_apply
1298 (caller, ipa_size_summaries->get (caller)->size))
1300 if (dump)
1301 fprintf (dump_file,
1302 " Wrapper penalty. Increasing growth %i to %i\n",
1303 overall_growth, caller_growth);
1304 overall_growth = caller_growth;
1307 if (overall_growth > 0)
1309 /* Strongly prefer functions with few callers that can be inlined
1310 fully. The square root here leads to smaller binaries at average.
1311 Watch however for extreme cases and return to linear function
1312 when growth is large. */
1313 if (overall_growth < 256)
1314 overall_growth *= overall_growth;
1315 else
1316 overall_growth += 256 * 256 - 256;
1317 denominator *= overall_growth;
1319 denominator *= ipa_size_summaries->get (caller)->size + growth;
1321 badness = - numerator / denominator;
1323 if (dump)
1325 fprintf (dump_file,
1326 " %f: guessed profile. frequency %f, count %" PRId64
1327 " caller count %" PRId64
1328 " time saved %f"
1329 " overall growth %i (current) %i (original)"
1330 " %i (compensated)\n",
1331 badness.to_double (),
1332 freq.to_double (),
1333 edge->count.ipa ().initialized_p ()
1334 ? edge->count.ipa ().to_gcov_type () : -1,
1335 caller->count.ipa ().initialized_p ()
1336 ? caller->count.ipa ().to_gcov_type () : -1,
1337 inlining_speedup (edge, freq, unspec_edge_time,
1338 edge_time).to_double (),
1339 estimate_growth (callee),
1340 callee_info->growth, overall_growth);
1343 /* When function local profile is not available or it does not give
1344 useful information (i.e. frequency is zero), base the cost on
1345 loop nest and overall size growth, so we optimize for overall number
1346 of functions fully inlined in program. */
1347 else
1349 int nest = MIN (ipa_call_summaries->get (edge)->loop_depth, 8);
1350 badness = growth;
1352 /* Decrease badness if call is nested. */
1353 if (badness > 0)
1354 badness = badness >> nest;
1355 else
1356 badness = badness << nest;
1357 if (dump)
1358 fprintf (dump_file, " %f: no profile. nest %i\n",
1359 badness.to_double (), nest);
1361 gcc_checking_assert (badness != 0);
1363 if (edge->recursive_p ())
1364 badness = badness.shift (badness > 0 ? 4 : -4);
1365 if ((hints & (INLINE_HINT_indirect_call
1366 | INLINE_HINT_loop_iterations
1367 | INLINE_HINT_loop_stride))
1368 || callee_info->growth <= 0)
1369 badness = badness.shift (badness > 0 ? -2 : 2);
1370 if (hints & INLINE_HINT_builtin_constant_p)
1371 badness = badness.shift (badness > 0 ? -4 : 4);
1372 if (hints & (INLINE_HINT_same_scc))
1373 badness = badness.shift (badness > 0 ? 3 : -3);
1374 else if (hints & (INLINE_HINT_in_scc))
1375 badness = badness.shift (badness > 0 ? 2 : -2);
1376 else if (hints & (INLINE_HINT_cross_module))
1377 badness = badness.shift (badness > 0 ? 1 : -1);
1378 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1379 badness = badness.shift (badness > 0 ? -4 : 4);
1380 else if ((hints & INLINE_HINT_declared_inline))
1381 badness = badness.shift (badness > 0 ? -3 : 3);
1382 if (dump)
1383 fprintf (dump_file, " Adjusted by hints %f\n", badness.to_double ());
1384 return badness;
1387 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1388 static inline void
1389 update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
1391 sreal badness = edge_badness (edge, false);
1392 if (edge->aux)
1394 edge_heap_node_t *n = (edge_heap_node_t *) edge->aux;
1395 gcc_checking_assert (n->get_data () == edge);
1397 /* fibonacci_heap::replace_key does busy updating of the
1398 heap that is unnecessarily expensive.
1399 We do lazy increases: after extracting minimum if the key
1400 turns out to be out of date, it is re-inserted into heap
1401 with correct value. */
1402 if (badness < n->get_key ())
1404 if (dump_file && (dump_flags & TDF_DETAILS))
1406 fprintf (dump_file,
1407 " decreasing badness %s -> %s, %f to %f\n",
1408 edge->caller->dump_name (),
1409 edge->callee->dump_name (),
1410 n->get_key ().to_double (),
1411 badness.to_double ());
1413 heap->decrease_key (n, badness);
1416 else
1418 if (dump_file && (dump_flags & TDF_DETAILS))
1420 fprintf (dump_file,
1421 " enqueuing call %s -> %s, badness %f\n",
1422 edge->caller->dump_name (),
1423 edge->callee->dump_name (),
1424 badness.to_double ());
1426 edge->aux = heap->insert (badness, edge);
1431 /* NODE was inlined.
1432 All caller edges needs to be reset because
1433 size estimates change. Similarly callees needs reset
1434 because better context may be known. */
1436 static void
1437 reset_edge_caches (struct cgraph_node *node)
1439 struct cgraph_edge *edge;
1440 struct cgraph_edge *e = node->callees;
1441 struct cgraph_node *where = node;
1442 struct ipa_ref *ref;
1444 if (where->inlined_to)
1445 where = where->inlined_to;
1447 reset_node_cache (where);
1449 if (edge_growth_cache != NULL)
1450 for (edge = where->callers; edge; edge = edge->next_caller)
1451 if (edge->inline_failed)
1452 edge_growth_cache->remove (edge);
1454 FOR_EACH_ALIAS (where, ref)
1455 reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
1457 if (!e)
1458 return;
1460 while (true)
1461 if (!e->inline_failed && e->callee->callees)
1462 e = e->callee->callees;
1463 else
1465 if (edge_growth_cache != NULL && e->inline_failed)
1466 edge_growth_cache->remove (e);
1467 if (e->next_callee)
1468 e = e->next_callee;
1469 else
1473 if (e->caller == node)
1474 return;
1475 e = e->caller->callers;
1477 while (!e->next_callee);
1478 e = e->next_callee;
1483 /* Recompute HEAP nodes for each of caller of NODE.
1484 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1485 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1486 it is inlinable. Otherwise check all edges. */
1488 static void
1489 update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
1490 bitmap updated_nodes,
1491 struct cgraph_edge *check_inlinablity_for)
1493 struct cgraph_edge *edge;
1494 struct ipa_ref *ref;
1496 if ((!node->alias && !ipa_fn_summaries->get (node)->inlinable)
1497 || node->inlined_to)
1498 return;
1499 if (!bitmap_set_bit (updated_nodes, node->get_uid ()))
1500 return;
1502 FOR_EACH_ALIAS (node, ref)
1504 struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
1505 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1508 for (edge = node->callers; edge; edge = edge->next_caller)
1509 if (edge->inline_failed)
1511 if (!check_inlinablity_for
1512 || check_inlinablity_for == edge)
1514 if (can_inline_edge_p (edge, false)
1515 && want_inline_small_function_p (edge, false)
1516 && can_inline_edge_by_limits_p (edge, false))
1517 update_edge_key (heap, edge);
1518 else if (edge->aux)
1520 report_inline_failed_reason (edge);
1521 heap->delete_node ((edge_heap_node_t *) edge->aux);
1522 edge->aux = NULL;
1525 else if (edge->aux)
1526 update_edge_key (heap, edge);
1530 /* Recompute HEAP nodes for each uninlined call in NODE
1531 If UPDATE_SINCE is non-NULL check if edges called within that function
1532 are inlinable (typically UPDATE_SINCE is the inline clone we introduced
1533 where all edges have new context).
1535 This is used when we know that edge badnesses are going only to increase
1536 (we introduced new call site) and thus all we need is to insert newly
1537 created edges into heap. */
1539 static void
1540 update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
1541 struct cgraph_node *update_since,
1542 bitmap updated_nodes)
1544 struct cgraph_edge *e = node->callees;
1545 bool check_inlinability = update_since == node;
1547 if (!e)
1548 return;
1549 while (true)
1550 if (!e->inline_failed && e->callee->callees)
1552 if (e->callee == update_since)
1553 check_inlinability = true;
1554 e = e->callee->callees;
1556 else
1558 enum availability avail;
1559 struct cgraph_node *callee;
1560 if (!check_inlinability)
1562 if (e->aux
1563 && !bitmap_bit_p (updated_nodes,
1564 e->callee->ultimate_alias_target
1565 (&avail, e->caller)->get_uid ()))
1566 update_edge_key (heap, e);
1568 /* We do not reset callee growth cache here. Since we added a new call,
1569 growth should have just increased and consequently badness metric
1570 don't need updating. */
1571 else if (e->inline_failed
1572 && (callee = e->callee->ultimate_alias_target (&avail,
1573 e->caller))
1574 && avail >= AVAIL_AVAILABLE
1575 && ipa_fn_summaries->get (callee) != NULL
1576 && ipa_fn_summaries->get (callee)->inlinable
1577 && !bitmap_bit_p (updated_nodes, callee->get_uid ()))
1579 if (can_inline_edge_p (e, false)
1580 && want_inline_small_function_p (e, false)
1581 && can_inline_edge_by_limits_p (e, false))
1583 gcc_checking_assert (check_inlinability || can_inline_edge_p (e, false));
1584 gcc_checking_assert (check_inlinability || e->aux);
1585 update_edge_key (heap, e);
1587 else if (e->aux)
1589 report_inline_failed_reason (e);
1590 heap->delete_node ((edge_heap_node_t *) e->aux);
1591 e->aux = NULL;
1594 /* In case we redirected to unreachable node we only need to remove the
1595 fibheap entry. */
1596 else if (e->aux)
1598 heap->delete_node ((edge_heap_node_t *) e->aux);
1599 e->aux = NULL;
1601 if (e->next_callee)
1602 e = e->next_callee;
1603 else
1607 if (e->caller == node)
1608 return;
1609 if (e->caller == update_since)
1610 check_inlinability = false;
1611 e = e->caller->callers;
1613 while (!e->next_callee);
1614 e = e->next_callee;
1619 /* Enqueue all recursive calls from NODE into priority queue depending on
1620 how likely we want to recursively inline the call. */
1622 static void
1623 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1624 edge_heap_t *heap)
1626 struct cgraph_edge *e;
1627 enum availability avail;
1629 for (e = where->callees; e; e = e->next_callee)
1630 if (e->callee == node
1631 || (e->callee->ultimate_alias_target (&avail, e->caller) == node
1632 && avail > AVAIL_INTERPOSABLE))
1633 heap->insert (-e->sreal_frequency (), e);
1634 for (e = where->callees; e; e = e->next_callee)
1635 if (!e->inline_failed)
1636 lookup_recursive_calls (node, e->callee, heap);
1639 /* Decide on recursive inlining: in the case function has recursive calls,
1640 inline until body size reaches given argument. If any new indirect edges
1641 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1642 is NULL. */
1644 static bool
1645 recursive_inlining (struct cgraph_edge *edge,
1646 vec<cgraph_edge *> *new_edges)
1648 cgraph_node *to = (edge->caller->inlined_to
1649 ? edge->caller->inlined_to : edge->caller);
1650 int limit = opt_for_fn (to->decl,
1651 param_max_inline_insns_recursive_auto);
1652 edge_heap_t heap (sreal::min ());
1653 struct cgraph_node *node;
1654 struct cgraph_edge *e;
1655 struct cgraph_node *master_clone = NULL, *next;
1656 int depth = 0;
1657 int n = 0;
1659 node = edge->caller;
1660 if (node->inlined_to)
1661 node = node->inlined_to;
1663 if (DECL_DECLARED_INLINE_P (node->decl))
1664 limit = opt_for_fn (to->decl, param_max_inline_insns_recursive);
1666 /* Make sure that function is small enough to be considered for inlining. */
1667 if (estimate_size_after_inlining (node, edge) >= limit)
1668 return false;
1669 lookup_recursive_calls (node, node, &heap);
1670 if (heap.empty ())
1671 return false;
1673 if (dump_file)
1674 fprintf (dump_file,
1675 " Performing recursive inlining on %s\n", node->dump_name ());
1677 /* Do the inlining and update list of recursive call during process. */
1678 while (!heap.empty ())
1680 struct cgraph_edge *curr = heap.extract_min ();
1681 struct cgraph_node *cnode, *dest = curr->callee;
1683 if (!can_inline_edge_p (curr, true)
1684 || !can_inline_edge_by_limits_p (curr, true))
1685 continue;
1687 /* MASTER_CLONE is produced in the case we already started modified
1688 the function. Be sure to redirect edge to the original body before
1689 estimating growths otherwise we will be seeing growths after inlining
1690 the already modified body. */
1691 if (master_clone)
1693 curr->redirect_callee (master_clone);
1694 if (edge_growth_cache != NULL)
1695 edge_growth_cache->remove (curr);
1698 if (estimate_size_after_inlining (node, curr) > limit)
1700 curr->redirect_callee (dest);
1701 if (edge_growth_cache != NULL)
1702 edge_growth_cache->remove (curr);
1703 break;
1706 depth = 1;
1707 for (cnode = curr->caller;
1708 cnode->inlined_to; cnode = cnode->callers->caller)
1709 if (node->decl
1710 == curr->callee->ultimate_alias_target ()->decl)
1711 depth++;
1713 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1715 curr->redirect_callee (dest);
1716 if (edge_growth_cache != NULL)
1717 edge_growth_cache->remove (curr);
1718 continue;
1721 if (dump_file)
1723 fprintf (dump_file,
1724 " Inlining call of depth %i", depth);
1725 if (node->count.nonzero_p () && curr->count.initialized_p ())
1727 fprintf (dump_file, " called approx. %.2f times per call",
1728 (double)curr->count.to_gcov_type ()
1729 / node->count.to_gcov_type ());
1731 fprintf (dump_file, "\n");
1733 if (!master_clone)
1735 /* We need original clone to copy around. */
1736 master_clone = node->create_clone (node->decl, node->count,
1737 false, vNULL, true, NULL, NULL);
1738 for (e = master_clone->callees; e; e = e->next_callee)
1739 if (!e->inline_failed)
1740 clone_inlined_nodes (e, true, false, NULL);
1741 curr->redirect_callee (master_clone);
1742 if (edge_growth_cache != NULL)
1743 edge_growth_cache->remove (curr);
1746 inline_call (curr, false, new_edges, &overall_size, true);
1747 reset_node_cache (node);
1748 lookup_recursive_calls (node, curr->callee, &heap);
1749 n++;
1752 if (!heap.empty () && dump_file)
1753 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1755 if (!master_clone)
1756 return false;
1758 if (dump_enabled_p ())
1759 dump_printf_loc (MSG_NOTE, edge->call_stmt,
1760 "\n Inlined %i times, "
1761 "body grown from size %i to %i, time %f to %f\n", n,
1762 ipa_size_summaries->get (master_clone)->size,
1763 ipa_size_summaries->get (node)->size,
1764 ipa_fn_summaries->get (master_clone)->time.to_double (),
1765 ipa_fn_summaries->get (node)->time.to_double ());
1767 /* Remove master clone we used for inlining. We rely that clones inlined
1768 into master clone gets queued just before master clone so we don't
1769 need recursion. */
1770 for (node = symtab->first_function (); node != master_clone;
1771 node = next)
1773 next = symtab->next_function (node);
1774 if (node->inlined_to == master_clone)
1775 node->remove ();
1777 master_clone->remove ();
1778 return true;
1782 /* Given whole compilation unit estimate of INSNS, compute how large we can
1783 allow the unit to grow. */
1785 static int64_t
1786 compute_max_insns (cgraph_node *node, int insns)
1788 int max_insns = insns;
1789 if (max_insns < opt_for_fn (node->decl, param_large_unit_insns))
1790 max_insns = opt_for_fn (node->decl, param_large_unit_insns);
1792 return ((int64_t) max_insns
1793 * (100 + opt_for_fn (node->decl, param_inline_unit_growth)) / 100);
1797 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1799 static void
1800 add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> &new_edges)
1802 while (new_edges.length () > 0)
1804 struct cgraph_edge *edge = new_edges.pop ();
1806 gcc_assert (!edge->aux);
1807 gcc_assert (edge->callee);
1808 if (edge->inline_failed
1809 && can_inline_edge_p (edge, true)
1810 && want_inline_small_function_p (edge, true)
1811 && can_inline_edge_by_limits_p (edge, true))
1812 edge->aux = heap->insert (edge_badness (edge, false), edge);
1816 /* Remove EDGE from the fibheap. */
1818 static void
1819 heap_edge_removal_hook (struct cgraph_edge *e, void *data)
1821 if (e->aux)
1823 ((edge_heap_t *)data)->delete_node ((edge_heap_node_t *)e->aux);
1824 e->aux = NULL;
1828 /* Return true if speculation of edge E seems useful.
1829 If ANTICIPATE_INLINING is true, be conservative and hope that E
1830 may get inlined. */
1832 bool
1833 speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
1835 /* If we have already decided to inline the edge, it seems useful. */
1836 if (!e->inline_failed)
1837 return true;
1839 enum availability avail;
1840 struct cgraph_node *target = e->callee->ultimate_alias_target (&avail,
1841 e->caller);
1843 gcc_assert (e->speculative && !e->indirect_unknown_callee);
1845 if (!e->maybe_hot_p ())
1846 return false;
1848 /* See if IP optimizations found something potentially useful about the
1849 function. For now we look only for CONST/PURE flags. Almost everything
1850 else we propagate is useless. */
1851 if (avail >= AVAIL_AVAILABLE)
1853 int ecf_flags = flags_from_decl_or_type (target->decl);
1854 if (ecf_flags & ECF_CONST)
1856 if (!(e->speculative_call_indirect_edge ()->indirect_info
1857 ->ecf_flags & ECF_CONST))
1858 return true;
1860 else if (ecf_flags & ECF_PURE)
1862 if (!(e->speculative_call_indirect_edge ()->indirect_info
1863 ->ecf_flags & ECF_PURE))
1864 return true;
1867 /* If we did not managed to inline the function nor redirect
1868 to an ipa-cp clone (that are seen by having local flag set),
1869 it is probably pointless to inline it unless hardware is missing
1870 indirect call predictor. */
1871 if (!anticipate_inlining && !target->local)
1872 return false;
1873 /* For overwritable targets there is not much to do. */
1874 if (!can_inline_edge_p (e, false)
1875 || !can_inline_edge_by_limits_p (e, false, true))
1876 return false;
1877 /* OK, speculation seems interesting. */
1878 return true;
1881 /* We know that EDGE is not going to be inlined.
1882 See if we can remove speculation. */
1884 static void
1885 resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
1887 if (edge->speculative && !speculation_useful_p (edge, false))
1889 struct cgraph_node *node = edge->caller;
1890 struct cgraph_node *where = node->inlined_to
1891 ? node->inlined_to : node;
1892 auto_bitmap updated_nodes;
1894 if (edge->count.ipa ().initialized_p ())
1895 spec_rem += edge->count.ipa ();
1896 cgraph_edge::resolve_speculation (edge);
1897 reset_edge_caches (where);
1898 ipa_update_overall_fn_summary (where);
1899 update_caller_keys (edge_heap, where,
1900 updated_nodes, NULL);
1901 update_callee_keys (edge_heap, where, NULL,
1902 updated_nodes);
1906 /* Return true if NODE should be accounted for overall size estimate.
1907 Skip all nodes optimized for size so we can measure the growth of hot
1908 part of program no matter of the padding. */
1910 bool
1911 inline_account_function_p (struct cgraph_node *node)
1913 return (!DECL_EXTERNAL (node->decl)
1914 && !opt_for_fn (node->decl, optimize_size)
1915 && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED);
1918 /* Count number of callers of NODE and store it into DATA (that
1919 points to int. Worker for cgraph_for_node_and_aliases. */
1921 static bool
1922 sum_callers (struct cgraph_node *node, void *data)
1924 struct cgraph_edge *e;
1925 int *num_calls = (int *)data;
1927 for (e = node->callers; e; e = e->next_caller)
1928 (*num_calls)++;
1929 return false;
1932 /* We only propagate across edges with non-interposable callee. */
1934 inline bool
1935 ignore_edge_p (struct cgraph_edge *e)
1937 enum availability avail;
1938 e->callee->function_or_virtual_thunk_symbol (&avail, e->caller);
1939 return (avail <= AVAIL_INTERPOSABLE);
1942 /* We use greedy algorithm for inlining of small functions:
1943 All inline candidates are put into prioritized heap ordered in
1944 increasing badness.
1946 The inlining of small functions is bounded by unit growth parameters. */
1948 static void
1949 inline_small_functions (void)
1951 struct cgraph_node *node;
1952 struct cgraph_edge *edge;
1953 edge_heap_t edge_heap (sreal::min ());
1954 auto_bitmap updated_nodes;
1955 int min_size;
1956 auto_vec<cgraph_edge *> new_indirect_edges;
1957 int initial_size = 0;
1958 struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
1959 struct cgraph_edge_hook_list *edge_removal_hook_holder;
1960 new_indirect_edges.create (8);
1962 edge_removal_hook_holder
1963 = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap);
1965 /* Compute overall unit size and other global parameters used by badness
1966 metrics. */
1968 max_count = profile_count::uninitialized ();
1969 ipa_reduced_postorder (order, true, ignore_edge_p);
1970 free (order);
1972 FOR_EACH_DEFINED_FUNCTION (node)
1973 if (!node->inlined_to)
1975 if (!node->alias && node->analyzed
1976 && (node->has_gimple_body_p () || node->thunk)
1977 && opt_for_fn (node->decl, optimize))
1979 class ipa_fn_summary *info = ipa_fn_summaries->get (node);
1980 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
1982 /* Do not account external functions, they will be optimized out
1983 if not inlined. Also only count the non-cold portion of program. */
1984 if (inline_account_function_p (node))
1985 initial_size += ipa_size_summaries->get (node)->size;
1986 info->growth = estimate_growth (node);
1988 int num_calls = 0;
1989 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
1990 true);
1991 if (num_calls == 1)
1992 info->single_caller = true;
1993 if (dfs && dfs->next_cycle)
1995 struct cgraph_node *n2;
1996 int id = dfs->scc_no + 1;
1997 for (n2 = node; n2;
1998 n2 = ((struct ipa_dfs_info *) n2->aux)->next_cycle)
1999 if (opt_for_fn (n2->decl, optimize))
2001 ipa_fn_summary *info2 = ipa_fn_summaries->get
2002 (n2->inlined_to ? n2->inlined_to : n2);
2003 if (info2->scc_no)
2004 break;
2005 info2->scc_no = id;
2010 for (edge = node->callers; edge; edge = edge->next_caller)
2011 max_count = max_count.max (edge->count.ipa ());
2013 ipa_free_postorder_info ();
2014 initialize_growth_caches ();
2016 if (dump_file)
2017 fprintf (dump_file,
2018 "\nDeciding on inlining of small functions. Starting with size %i.\n",
2019 initial_size);
2021 overall_size = initial_size;
2022 min_size = overall_size;
2024 /* Populate the heap with all edges we might inline. */
2026 FOR_EACH_DEFINED_FUNCTION (node)
2028 bool update = false;
2029 struct cgraph_edge *next = NULL;
2030 bool has_speculative = false;
2032 if (!opt_for_fn (node->decl, optimize))
2033 continue;
2035 if (dump_file)
2036 fprintf (dump_file, "Enqueueing calls in %s.\n", node->dump_name ());
2038 for (edge = node->callees; edge; edge = edge->next_callee)
2040 if (edge->inline_failed
2041 && !edge->aux
2042 && can_inline_edge_p (edge, true)
2043 && want_inline_small_function_p (edge, true)
2044 && can_inline_edge_by_limits_p (edge, true)
2045 && edge->inline_failed)
2047 gcc_assert (!edge->aux);
2048 update_edge_key (&edge_heap, edge);
2050 if (edge->speculative)
2051 has_speculative = true;
2053 if (has_speculative)
2054 for (edge = node->callees; edge; edge = next)
2056 next = edge->next_callee;
2057 if (edge->speculative
2058 && !speculation_useful_p (edge, edge->aux != NULL))
2060 cgraph_edge::resolve_speculation (edge);
2061 update = true;
2064 if (update)
2066 struct cgraph_node *where = node->inlined_to
2067 ? node->inlined_to : node;
2068 ipa_update_overall_fn_summary (where);
2069 reset_edge_caches (where);
2070 update_caller_keys (&edge_heap, where,
2071 updated_nodes, NULL);
2072 update_callee_keys (&edge_heap, where, NULL,
2073 updated_nodes);
2074 bitmap_clear (updated_nodes);
2078 gcc_assert (in_lto_p
2079 || !(max_count > 0)
2080 || (profile_info && flag_branch_probabilities));
2082 while (!edge_heap.empty ())
2084 int old_size = overall_size;
2085 struct cgraph_node *where, *callee;
2086 sreal badness = edge_heap.min_key ();
2087 sreal current_badness;
2088 int growth;
2090 edge = edge_heap.extract_min ();
2091 gcc_assert (edge->aux);
2092 edge->aux = NULL;
2093 if (!edge->inline_failed || !edge->callee->analyzed)
2094 continue;
2096 /* Be sure that caches are maintained consistent.
2097 This check is affected by scaling roundoff errors when compiling for
2098 IPA this we skip it in that case. */
2099 if (flag_checking && !edge->callee->count.ipa_p ()
2100 && (!max_count.initialized_p () || !max_count.nonzero_p ()))
2102 sreal cached_badness = edge_badness (edge, false);
2104 int old_size_est = estimate_edge_size (edge);
2105 sreal old_time_est = estimate_edge_time (edge);
2106 int old_hints_est = estimate_edge_hints (edge);
2108 if (edge_growth_cache != NULL)
2109 edge_growth_cache->remove (edge);
2110 reset_node_cache (edge->caller->inlined_to
2111 ? edge->caller->inlined_to
2112 : edge->caller);
2113 gcc_assert (old_size_est == estimate_edge_size (edge));
2114 gcc_assert (old_time_est == estimate_edge_time (edge));
2115 /* FIXME:
2117 gcc_assert (old_hints_est == estimate_edge_hints (edge));
2119 fails with profile feedback because some hints depends on
2120 maybe_hot_edge_p predicate and because callee gets inlined to other
2121 calls, the edge may become cold.
2122 This ought to be fixed by computing relative probabilities
2123 for given invocation but that will be better done once whole
2124 code is converted to sreals. Disable for now and revert to "wrong"
2125 value so enable/disable checking paths agree. */
2126 edge_growth_cache->get (edge)->hints = old_hints_est + 1;
2128 /* When updating the edge costs, we only decrease badness in the keys.
2129 Increases of badness are handled lazily; when we see key with out
2130 of date value on it, we re-insert it now. */
2131 current_badness = edge_badness (edge, false);
2132 gcc_assert (cached_badness == current_badness);
2133 gcc_assert (current_badness >= badness);
2135 else
2136 current_badness = edge_badness (edge, false);
2137 if (current_badness != badness)
2139 if (edge_heap.min () && current_badness > edge_heap.min_key ())
2141 edge->aux = edge_heap.insert (current_badness, edge);
2142 continue;
2144 else
2145 badness = current_badness;
2148 if (!can_inline_edge_p (edge, true)
2149 || !can_inline_edge_by_limits_p (edge, true))
2151 resolve_noninline_speculation (&edge_heap, edge);
2152 continue;
2155 callee = edge->callee->ultimate_alias_target ();
2156 growth = estimate_edge_growth (edge);
2157 if (dump_file)
2159 fprintf (dump_file,
2160 "\nConsidering %s with %i size\n",
2161 callee->dump_name (),
2162 ipa_size_summaries->get (callee)->size);
2163 fprintf (dump_file,
2164 " to be inlined into %s in %s:%i\n"
2165 " Estimated badness is %f, frequency %.2f.\n",
2166 edge->caller->dump_name (),
2167 edge->call_stmt
2168 && (LOCATION_LOCUS (gimple_location ((const gimple *)
2169 edge->call_stmt))
2170 > BUILTINS_LOCATION)
2171 ? gimple_filename ((const gimple *) edge->call_stmt)
2172 : "unknown",
2173 edge->call_stmt
2174 ? gimple_lineno ((const gimple *) edge->call_stmt)
2175 : -1,
2176 badness.to_double (),
2177 edge->sreal_frequency ().to_double ());
2178 if (edge->count.ipa ().initialized_p ())
2180 fprintf (dump_file, " Called ");
2181 edge->count.ipa ().dump (dump_file);
2182 fprintf (dump_file, " times\n");
2184 if (dump_flags & TDF_DETAILS)
2185 edge_badness (edge, true);
2188 where = edge->caller;
2190 if (overall_size + growth > compute_max_insns (where, min_size)
2191 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
2193 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
2194 report_inline_failed_reason (edge);
2195 resolve_noninline_speculation (&edge_heap, edge);
2196 continue;
2199 if (!want_inline_small_function_p (edge, true))
2201 resolve_noninline_speculation (&edge_heap, edge);
2202 continue;
2205 profile_count old_count = callee->count;
2207 /* Heuristics for inlining small functions work poorly for
2208 recursive calls where we do effects similar to loop unrolling.
2209 When inlining such edge seems profitable, leave decision on
2210 specific inliner. */
2211 if (edge->recursive_p ())
2213 if (where->inlined_to)
2214 where = where->inlined_to;
2215 if (!recursive_inlining (edge,
2216 opt_for_fn (edge->caller->decl,
2217 flag_indirect_inlining)
2218 ? &new_indirect_edges : NULL))
2220 edge->inline_failed = CIF_RECURSIVE_INLINING;
2221 resolve_noninline_speculation (&edge_heap, edge);
2222 continue;
2224 reset_edge_caches (where);
2225 /* Recursive inliner inlines all recursive calls of the function
2226 at once. Consequently we need to update all callee keys. */
2227 if (opt_for_fn (edge->caller->decl, flag_indirect_inlining))
2228 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
2229 update_callee_keys (&edge_heap, where, where, updated_nodes);
2230 bitmap_clear (updated_nodes);
2232 else
2234 struct cgraph_node *outer_node = NULL;
2235 int depth = 0;
2237 /* Consider the case where self recursive function A is inlined
2238 into B. This is desired optimization in some cases, since it
2239 leads to effect similar of loop peeling and we might completely
2240 optimize out the recursive call. However we must be extra
2241 selective. */
2243 where = edge->caller;
2244 while (where->inlined_to)
2246 if (where->decl == callee->decl)
2247 outer_node = where, depth++;
2248 where = where->callers->caller;
2250 if (outer_node
2251 && !want_inline_self_recursive_call_p (edge, outer_node,
2252 true, depth))
2254 edge->inline_failed
2255 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
2256 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
2257 resolve_noninline_speculation (&edge_heap, edge);
2258 continue;
2260 else if (depth && dump_file)
2261 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
2263 gcc_checking_assert (!callee->inlined_to);
2265 int old_size = ipa_size_summaries->get (where)->size;
2266 sreal old_time = ipa_fn_summaries->get (where)->time;
2268 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
2269 reset_edge_caches (edge->callee);
2270 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
2272 /* If caller's size and time increased we do not need to update
2273 all edges because badness is not going to decrease. */
2274 if (old_size <= ipa_size_summaries->get (where)->size
2275 && old_time <= ipa_fn_summaries->get (where)->time
2276 /* Wrapper penalty may be non-monotonous in this respect.
2277 Fortunately it only affects small functions. */
2278 && !wrapper_heuristics_may_apply (where, old_size))
2279 update_callee_keys (&edge_heap, edge->callee, edge->callee,
2280 updated_nodes);
2281 else
2282 update_callee_keys (&edge_heap, where,
2283 edge->callee,
2284 updated_nodes);
2286 where = edge->caller;
2287 if (where->inlined_to)
2288 where = where->inlined_to;
2290 /* Our profitability metric can depend on local properties
2291 such as number of inlinable calls and size of the function body.
2292 After inlining these properties might change for the function we
2293 inlined into (since it's body size changed) and for the functions
2294 called by function we inlined (since number of it inlinable callers
2295 might change). */
2296 update_caller_keys (&edge_heap, where, updated_nodes, NULL);
2297 /* Offline copy count has possibly changed, recompute if profile is
2298 available. */
2299 struct cgraph_node *n
2300 = cgraph_node::get (edge->callee->decl)->ultimate_alias_target ();
2301 if (n != edge->callee && n->analyzed && !(n->count == old_count)
2302 && n->count.ipa_p ())
2303 update_callee_keys (&edge_heap, n, NULL, updated_nodes);
2304 bitmap_clear (updated_nodes);
2306 if (dump_enabled_p ())
2308 ipa_fn_summary *s = ipa_fn_summaries->get (where);
2310 /* dump_printf can't handle %+i. */
2311 char buf_net_change[100];
2312 snprintf (buf_net_change, sizeof buf_net_change, "%+i",
2313 overall_size - old_size);
2315 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, edge->call_stmt,
2316 " Inlined %C into %C which now has time %f and "
2317 "size %i, net change of %s%s.\n",
2318 edge->callee, edge->caller,
2319 s->time.to_double (),
2320 ipa_size_summaries->get (edge->caller)->size,
2321 buf_net_change,
2322 cross_module_call_p (edge) ? " (cross module)":"");
2324 if (min_size > overall_size)
2326 min_size = overall_size;
2328 if (dump_file)
2329 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
2333 free_growth_caches ();
2334 if (dump_enabled_p ())
2335 dump_printf (MSG_NOTE,
2336 "Unit growth for small function inlining: %i->%i (%i%%)\n",
2337 initial_size, overall_size,
2338 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
2339 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
2342 /* Flatten NODE. Performed both during early inlining and
2343 at IPA inlining time. */
2345 static void
2346 flatten_function (struct cgraph_node *node, bool early, bool update)
2348 struct cgraph_edge *e;
2350 /* We shouldn't be called recursively when we are being processed. */
2351 gcc_assert (node->aux == NULL);
2353 node->aux = (void *) node;
2355 for (e = node->callees; e; e = e->next_callee)
2357 struct cgraph_node *orig_callee;
2358 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2360 /* We've hit cycle? It is time to give up. */
2361 if (callee->aux)
2363 if (dump_enabled_p ())
2364 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
2365 "Not inlining %C into %C to avoid cycle.\n",
2366 callee, e->caller);
2367 if (cgraph_inline_failed_type (e->inline_failed) != CIF_FINAL_ERROR)
2368 e->inline_failed = CIF_RECURSIVE_INLINING;
2369 continue;
2372 /* When the edge is already inlined, we just need to recurse into
2373 it in order to fully flatten the leaves. */
2374 if (!e->inline_failed)
2376 flatten_function (callee, early, false);
2377 continue;
2380 /* Flatten attribute needs to be processed during late inlining. For
2381 extra code quality we however do flattening during early optimization,
2382 too. */
2383 if (!early
2384 ? !can_inline_edge_p (e, true)
2385 && !can_inline_edge_by_limits_p (e, true)
2386 : !can_early_inline_edge_p (e))
2387 continue;
2389 if (e->recursive_p ())
2391 if (dump_enabled_p ())
2392 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
2393 "Not inlining: recursive call.\n");
2394 continue;
2397 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
2398 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
2400 if (dump_enabled_p ())
2401 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
2402 "Not inlining: SSA form does not match.\n");
2403 continue;
2406 /* Inline the edge and flatten the inline clone. Avoid
2407 recursing through the original node if the node was cloned. */
2408 if (dump_enabled_p ())
2409 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, e->call_stmt,
2410 " Inlining %C into %C.\n",
2411 callee, e->caller);
2412 orig_callee = callee;
2413 inline_call (e, true, NULL, NULL, false);
2414 if (e->callee != orig_callee)
2415 orig_callee->aux = (void *) node;
2416 flatten_function (e->callee, early, false);
2417 if (e->callee != orig_callee)
2418 orig_callee->aux = NULL;
2421 node->aux = NULL;
2422 cgraph_node *where = node->inlined_to ? node->inlined_to : node;
2423 if (update && opt_for_fn (where->decl, optimize))
2424 ipa_update_overall_fn_summary (where);
2427 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
2428 DATA points to number of calls originally found so we avoid infinite
2429 recursion. */
2431 static bool
2432 inline_to_all_callers_1 (struct cgraph_node *node, void *data,
2433 hash_set<cgraph_node *> *callers)
2435 int *num_calls = (int *)data;
2436 bool callee_removed = false;
2438 while (node->callers && !node->inlined_to)
2440 struct cgraph_node *caller = node->callers->caller;
2442 if (!can_inline_edge_p (node->callers, true)
2443 || !can_inline_edge_by_limits_p (node->callers, true)
2444 || node->callers->recursive_p ())
2446 if (dump_file)
2447 fprintf (dump_file, "Uninlinable call found; giving up.\n");
2448 *num_calls = 0;
2449 return false;
2452 if (dump_file)
2454 cgraph_node *ultimate = node->ultimate_alias_target ();
2455 fprintf (dump_file,
2456 "\nInlining %s size %i.\n",
2457 ultimate->dump_name (),
2458 ipa_size_summaries->get (ultimate)->size);
2459 fprintf (dump_file,
2460 " Called once from %s %i insns.\n",
2461 node->callers->caller->dump_name (),
2462 ipa_size_summaries->get (node->callers->caller)->size);
2465 /* Remember which callers we inlined to, delaying updating the
2466 overall summary. */
2467 callers->add (node->callers->caller);
2468 inline_call (node->callers, true, NULL, NULL, false, &callee_removed);
2469 if (dump_file)
2470 fprintf (dump_file,
2471 " Inlined into %s which now has %i size\n",
2472 caller->dump_name (),
2473 ipa_size_summaries->get (caller)->size);
2474 if (!(*num_calls)--)
2476 if (dump_file)
2477 fprintf (dump_file, "New calls found; giving up.\n");
2478 return callee_removed;
2480 if (callee_removed)
2481 return true;
2483 return false;
2486 /* Wrapper around inline_to_all_callers_1 doing delayed overall summary
2487 update. */
2489 static bool
2490 inline_to_all_callers (struct cgraph_node *node, void *data)
2492 hash_set<cgraph_node *> callers;
2493 bool res = inline_to_all_callers_1 (node, data, &callers);
2494 /* Perform the delayed update of the overall summary of all callers
2495 processed. This avoids quadratic behavior in the cases where
2496 we have a lot of calls to the same function. */
2497 for (hash_set<cgraph_node *>::iterator i = callers.begin ();
2498 i != callers.end (); ++i)
2499 ipa_update_overall_fn_summary ((*i)->inlined_to ? (*i)->inlined_to : *i);
2500 return res;
2503 /* Output overall time estimate. */
2504 static void
2505 dump_overall_stats (void)
2507 sreal sum_weighted = 0, sum = 0;
2508 struct cgraph_node *node;
2510 FOR_EACH_DEFINED_FUNCTION (node)
2511 if (!node->inlined_to
2512 && !node->alias)
2514 ipa_fn_summary *s = ipa_fn_summaries->get (node);
2515 if (s != NULL)
2517 sum += s->time;
2518 if (node->count.ipa ().initialized_p ())
2519 sum_weighted += s->time * node->count.ipa ().to_gcov_type ();
2522 fprintf (dump_file, "Overall time estimate: "
2523 "%f weighted by profile: "
2524 "%f\n", sum.to_double (), sum_weighted.to_double ());
2527 /* Output some useful stats about inlining. */
2529 static void
2530 dump_inline_stats (void)
2532 int64_t inlined_cnt = 0, inlined_indir_cnt = 0;
2533 int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0;
2534 int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0;
2535 int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
2536 int64_t inlined_speculative = 0, inlined_speculative_ply = 0;
2537 int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
2538 int64_t reason[CIF_N_REASONS][2];
2539 sreal reason_freq[CIF_N_REASONS];
2540 int i;
2541 struct cgraph_node *node;
2543 memset (reason, 0, sizeof (reason));
2544 for (i=0; i < CIF_N_REASONS; i++)
2545 reason_freq[i] = 0;
2546 FOR_EACH_DEFINED_FUNCTION (node)
2548 struct cgraph_edge *e;
2549 for (e = node->callees; e; e = e->next_callee)
2551 if (e->inline_failed)
2553 if (e->count.ipa ().initialized_p ())
2554 reason[(int) e->inline_failed][0] += e->count.ipa ().to_gcov_type ();
2555 reason_freq[(int) e->inline_failed] += e->sreal_frequency ();
2556 reason[(int) e->inline_failed][1] ++;
2557 if (DECL_VIRTUAL_P (e->callee->decl)
2558 && e->count.ipa ().initialized_p ())
2560 if (e->indirect_inlining_edge)
2561 noninlined_virt_indir_cnt += e->count.ipa ().to_gcov_type ();
2562 else
2563 noninlined_virt_cnt += e->count.ipa ().to_gcov_type ();
2565 else if (e->count.ipa ().initialized_p ())
2567 if (e->indirect_inlining_edge)
2568 noninlined_indir_cnt += e->count.ipa ().to_gcov_type ();
2569 else
2570 noninlined_cnt += e->count.ipa ().to_gcov_type ();
2573 else if (e->count.ipa ().initialized_p ())
2575 if (e->speculative)
2577 if (DECL_VIRTUAL_P (e->callee->decl))
2578 inlined_speculative_ply += e->count.ipa ().to_gcov_type ();
2579 else
2580 inlined_speculative += e->count.ipa ().to_gcov_type ();
2582 else if (DECL_VIRTUAL_P (e->callee->decl))
2584 if (e->indirect_inlining_edge)
2585 inlined_virt_indir_cnt += e->count.ipa ().to_gcov_type ();
2586 else
2587 inlined_virt_cnt += e->count.ipa ().to_gcov_type ();
2589 else
2591 if (e->indirect_inlining_edge)
2592 inlined_indir_cnt += e->count.ipa ().to_gcov_type ();
2593 else
2594 inlined_cnt += e->count.ipa ().to_gcov_type ();
2598 for (e = node->indirect_calls; e; e = e->next_callee)
2599 if (e->indirect_info->polymorphic
2600 & e->count.ipa ().initialized_p ())
2601 indirect_poly_cnt += e->count.ipa ().to_gcov_type ();
2602 else if (e->count.ipa ().initialized_p ())
2603 indirect_cnt += e->count.ipa ().to_gcov_type ();
2605 if (max_count.initialized_p ())
2607 fprintf (dump_file,
2608 "Inlined %" PRId64 " + speculative "
2609 "%" PRId64 " + speculative polymorphic "
2610 "%" PRId64 " + previously indirect "
2611 "%" PRId64 " + virtual "
2612 "%" PRId64 " + virtual and previously indirect "
2613 "%" PRId64 "\n" "Not inlined "
2614 "%" PRId64 " + previously indirect "
2615 "%" PRId64 " + virtual "
2616 "%" PRId64 " + virtual and previously indirect "
2617 "%" PRId64 " + still indirect "
2618 "%" PRId64 " + still indirect polymorphic "
2619 "%" PRId64 "\n", inlined_cnt,
2620 inlined_speculative, inlined_speculative_ply,
2621 inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
2622 noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
2623 noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
2624 fprintf (dump_file, "Removed speculations ");
2625 spec_rem.dump (dump_file);
2626 fprintf (dump_file, "\n");
2628 dump_overall_stats ();
2629 fprintf (dump_file, "\nWhy inlining failed?\n");
2630 for (i = 0; i < CIF_N_REASONS; i++)
2631 if (reason[i][1])
2632 fprintf (dump_file, "%-50s: %8i calls, %8f freq, %" PRId64" count\n",
2633 cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
2634 (int) reason[i][1], reason_freq[i].to_double (), reason[i][0]);
2637 /* Called when node is removed. */
2639 static void
2640 flatten_remove_node_hook (struct cgraph_node *node, void *data)
2642 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) == NULL)
2643 return;
2645 hash_set<struct cgraph_node *> *removed
2646 = (hash_set<struct cgraph_node *> *) data;
2647 removed->add (node);
2650 /* Decide on the inlining. We do so in the topological order to avoid
2651 expenses on updating data structures. */
2653 static unsigned int
2654 ipa_inline (void)
2656 struct cgraph_node *node;
2657 int nnodes;
2658 struct cgraph_node **order;
2659 int i, j;
2660 int cold;
2661 bool remove_functions = false;
2663 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
2665 if (dump_file)
2666 ipa_dump_fn_summaries (dump_file);
2668 nnodes = ipa_reverse_postorder (order);
2669 spec_rem = profile_count::zero ();
2671 FOR_EACH_FUNCTION (node)
2673 node->aux = 0;
2675 /* Recompute the default reasons for inlining because they may have
2676 changed during merging. */
2677 if (in_lto_p)
2679 for (cgraph_edge *e = node->callees; e; e = e->next_callee)
2681 gcc_assert (e->inline_failed);
2682 initialize_inline_failed (e);
2684 for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
2685 initialize_inline_failed (e);
2689 if (dump_file)
2690 fprintf (dump_file, "\nFlattening functions:\n");
2692 /* First shrink order array, so that it only contains nodes with
2693 flatten attribute. */
2694 for (i = nnodes - 1, j = i; i >= 0; i--)
2696 node = order[i];
2697 if (node->definition
2698 /* Do not try to flatten aliases. These may happen for example when
2699 creating local aliases. */
2700 && !node->alias
2701 && lookup_attribute ("flatten",
2702 DECL_ATTRIBUTES (node->decl)) != NULL)
2703 order[j--] = order[i];
2706 /* After the above loop, order[j + 1] ... order[nnodes - 1] contain
2707 nodes with flatten attribute. If there is more than one such
2708 node, we need to register a node removal hook, as flatten_function
2709 could remove other nodes with flatten attribute. See PR82801. */
2710 struct cgraph_node_hook_list *node_removal_hook_holder = NULL;
2711 hash_set<struct cgraph_node *> *flatten_removed_nodes = NULL;
2712 if (j < nnodes - 2)
2714 flatten_removed_nodes = new hash_set<struct cgraph_node *>;
2715 node_removal_hook_holder
2716 = symtab->add_cgraph_removal_hook (&flatten_remove_node_hook,
2717 flatten_removed_nodes);
2720 /* In the first pass handle functions to be flattened. Do this with
2721 a priority so none of our later choices will make this impossible. */
2722 for (i = nnodes - 1; i > j; i--)
2724 node = order[i];
2725 if (flatten_removed_nodes
2726 && flatten_removed_nodes->contains (node))
2727 continue;
2729 /* Handle nodes to be flattened.
2730 Ideally when processing callees we stop inlining at the
2731 entry of cycles, possibly cloning that entry point and
2732 try to flatten itself turning it into a self-recursive
2733 function. */
2734 if (dump_file)
2735 fprintf (dump_file, "Flattening %s\n", node->dump_name ());
2736 flatten_function (node, false, true);
2739 if (j < nnodes - 2)
2741 symtab->remove_cgraph_removal_hook (node_removal_hook_holder);
2742 delete flatten_removed_nodes;
2744 free (order);
2746 if (dump_file)
2747 dump_overall_stats ();
2749 inline_small_functions ();
2751 gcc_assert (symtab->state == IPA_SSA);
2752 symtab->state = IPA_SSA_AFTER_INLINING;
2753 /* Do first after-inlining removal. We want to remove all "stale" extern
2754 inline functions and virtual functions so we really know what is called
2755 once. */
2756 symtab->remove_unreachable_nodes (dump_file);
2758 /* Inline functions with a property that after inlining into all callers the
2759 code size will shrink because the out-of-line copy is eliminated.
2760 We do this regardless on the callee size as long as function growth limits
2761 are met. */
2762 if (dump_file)
2763 fprintf (dump_file,
2764 "\nDeciding on functions to be inlined into all callers and "
2765 "removing useless speculations:\n");
2767 /* Inlining one function called once has good chance of preventing
2768 inlining other function into the same callee. Ideally we should
2769 work in priority order, but probably inlining hot functions first
2770 is good cut without the extra pain of maintaining the queue.
2772 ??? this is not really fitting the bill perfectly: inlining function
2773 into callee often leads to better optimization of callee due to
2774 increased context for optimization.
2775 For example if main() function calls a function that outputs help
2776 and then function that does the main optimization, we should inline
2777 the second with priority even if both calls are cold by themselves.
2779 We probably want to implement new predicate replacing our use of
2780 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2781 to be hot. */
2782 for (cold = 0; cold <= 1; cold ++)
2784 FOR_EACH_DEFINED_FUNCTION (node)
2786 struct cgraph_edge *edge, *next;
2787 bool update=false;
2789 if (!opt_for_fn (node->decl, optimize)
2790 || !opt_for_fn (node->decl, flag_inline_functions_called_once))
2791 continue;
2793 for (edge = node->callees; edge; edge = next)
2795 next = edge->next_callee;
2796 if (edge->speculative && !speculation_useful_p (edge, false))
2798 if (edge->count.ipa ().initialized_p ())
2799 spec_rem += edge->count.ipa ();
2800 cgraph_edge::resolve_speculation (edge);
2801 update = true;
2802 remove_functions = true;
2805 if (update)
2807 struct cgraph_node *where = node->inlined_to
2808 ? node->inlined_to : node;
2809 reset_edge_caches (where);
2810 ipa_update_overall_fn_summary (where);
2812 if (want_inline_function_to_all_callers_p (node, cold))
2814 int num_calls = 0;
2815 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
2816 true);
2817 while (node->call_for_symbol_and_aliases
2818 (inline_to_all_callers, &num_calls, true))
2820 remove_functions = true;
2825 if (dump_enabled_p ())
2826 dump_printf (MSG_NOTE,
2827 "\nInlined %i calls, eliminated %i functions\n\n",
2828 ncalls_inlined, nfunctions_inlined);
2829 if (dump_file)
2830 dump_inline_stats ();
2832 if (dump_file)
2833 ipa_dump_fn_summaries (dump_file);
2834 return remove_functions ? TODO_remove_functions : 0;
2837 /* Inline always-inline function calls in NODE. */
2839 static bool
2840 inline_always_inline_functions (struct cgraph_node *node)
2842 struct cgraph_edge *e;
2843 bool inlined = false;
2845 for (e = node->callees; e; e = e->next_callee)
2847 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2848 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
2849 continue;
2851 if (e->recursive_p ())
2853 if (dump_enabled_p ())
2854 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
2855 " Not inlining recursive call to %C.\n",
2856 e->callee);
2857 e->inline_failed = CIF_RECURSIVE_INLINING;
2858 continue;
2861 if (!can_early_inline_edge_p (e))
2863 /* Set inlined to true if the callee is marked "always_inline" but
2864 is not inlinable. This will allow flagging an error later in
2865 expand_call_inline in tree-inline.c. */
2866 if (lookup_attribute ("always_inline",
2867 DECL_ATTRIBUTES (callee->decl)) != NULL)
2868 inlined = true;
2869 continue;
2872 if (dump_enabled_p ())
2873 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, e->call_stmt,
2874 " Inlining %C into %C (always_inline).\n",
2875 e->callee, e->caller);
2876 inline_call (e, true, NULL, NULL, false);
2877 inlined = true;
2879 if (inlined)
2880 ipa_update_overall_fn_summary (node);
2882 return inlined;
2885 /* Decide on the inlining. We do so in the topological order to avoid
2886 expenses on updating data structures. */
2888 static bool
2889 early_inline_small_functions (struct cgraph_node *node)
2891 struct cgraph_edge *e;
2892 bool inlined = false;
2894 for (e = node->callees; e; e = e->next_callee)
2896 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2898 /* We can encounter not-yet-analyzed function during
2899 early inlining on callgraphs with strongly
2900 connected components. */
2901 ipa_fn_summary *s = ipa_fn_summaries->get (callee);
2902 if (s == NULL || !s->inlinable || !e->inline_failed)
2903 continue;
2905 /* Do not consider functions not declared inline. */
2906 if (!DECL_DECLARED_INLINE_P (callee->decl)
2907 && !opt_for_fn (node->decl, flag_inline_small_functions)
2908 && !opt_for_fn (node->decl, flag_inline_functions))
2909 continue;
2911 if (dump_enabled_p ())
2912 dump_printf_loc (MSG_NOTE, e->call_stmt,
2913 "Considering inline candidate %C.\n",
2914 callee);
2916 if (!can_early_inline_edge_p (e))
2917 continue;
2919 if (e->recursive_p ())
2921 if (dump_enabled_p ())
2922 dump_printf_loc (MSG_MISSED_OPTIMIZATION, e->call_stmt,
2923 " Not inlining: recursive call.\n");
2924 continue;
2927 if (!want_early_inline_function_p (e))
2928 continue;
2930 if (dump_enabled_p ())
2931 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, e->call_stmt,
2932 " Inlining %C into %C.\n",
2933 callee, e->caller);
2934 inline_call (e, true, NULL, NULL, false);
2935 inlined = true;
2938 if (inlined)
2939 ipa_update_overall_fn_summary (node);
2941 return inlined;
2944 unsigned int
2945 early_inliner (function *fun)
2947 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2948 struct cgraph_edge *edge;
2949 unsigned int todo = 0;
2950 int iterations = 0;
2951 bool inlined = false;
2953 if (seen_error ())
2954 return 0;
2956 /* Do nothing if datastructures for ipa-inliner are already computed. This
2957 happens when some pass decides to construct new function and
2958 cgraph_add_new_function calls lowering passes and early optimization on
2959 it. This may confuse ourself when early inliner decide to inline call to
2960 function clone, because function clones don't have parameter list in
2961 ipa-prop matching their signature. */
2962 if (ipa_node_params_sum)
2963 return 0;
2965 if (flag_checking)
2966 node->verify ();
2967 node->remove_all_references ();
2969 /* Even when not optimizing or not inlining inline always-inline
2970 functions. */
2971 inlined = inline_always_inline_functions (node);
2973 if (!optimize
2974 || flag_no_inline
2975 || !flag_early_inlining
2976 /* Never inline regular functions into always-inline functions
2977 during incremental inlining. This sucks as functions calling
2978 always inline functions will get less optimized, but at the
2979 same time inlining of functions calling always inline
2980 function into an always inline function might introduce
2981 cycles of edges to be always inlined in the callgraph.
2983 We might want to be smarter and just avoid this type of inlining. */
2984 || (DECL_DISREGARD_INLINE_LIMITS (node->decl)
2985 && lookup_attribute ("always_inline",
2986 DECL_ATTRIBUTES (node->decl))))
2988 else if (lookup_attribute ("flatten",
2989 DECL_ATTRIBUTES (node->decl)) != NULL)
2991 /* When the function is marked to be flattened, recursively inline
2992 all calls in it. */
2993 if (dump_enabled_p ())
2994 dump_printf (MSG_OPTIMIZED_LOCATIONS,
2995 "Flattening %C\n", node);
2996 flatten_function (node, true, true);
2997 inlined = true;
2999 else
3001 /* If some always_inline functions was inlined, apply the changes.
3002 This way we will not account always inline into growth limits and
3003 moreover we will inline calls from always inlines that we skipped
3004 previously because of conditional above. */
3005 if (inlined)
3007 timevar_push (TV_INTEGRATION);
3008 todo |= optimize_inline_calls (current_function_decl);
3009 /* optimize_inline_calls call above might have introduced new
3010 statements that don't have inline parameters computed. */
3011 for (edge = node->callees; edge; edge = edge->next_callee)
3013 /* We can enounter not-yet-analyzed function during
3014 early inlining on callgraphs with strongly
3015 connected components. */
3016 ipa_call_summary *es = ipa_call_summaries->get_create (edge);
3017 es->call_stmt_size
3018 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
3019 es->call_stmt_time
3020 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
3022 ipa_update_overall_fn_summary (node);
3023 inlined = false;
3024 timevar_pop (TV_INTEGRATION);
3026 /* We iterate incremental inlining to get trivial cases of indirect
3027 inlining. */
3028 while (iterations < opt_for_fn (node->decl,
3029 param_early_inliner_max_iterations)
3030 && early_inline_small_functions (node))
3032 timevar_push (TV_INTEGRATION);
3033 todo |= optimize_inline_calls (current_function_decl);
3035 /* Technically we ought to recompute inline parameters so the new
3036 iteration of early inliner works as expected. We however have
3037 values approximately right and thus we only need to update edge
3038 info that might be cleared out for newly discovered edges. */
3039 for (edge = node->callees; edge; edge = edge->next_callee)
3041 /* We have no summary for new bound store calls yet. */
3042 ipa_call_summary *es = ipa_call_summaries->get_create (edge);
3043 es->call_stmt_size
3044 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
3045 es->call_stmt_time
3046 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
3048 if (iterations < opt_for_fn (node->decl,
3049 param_early_inliner_max_iterations) - 1)
3050 ipa_update_overall_fn_summary (node);
3051 timevar_pop (TV_INTEGRATION);
3052 iterations++;
3053 inlined = false;
3055 if (dump_file)
3056 fprintf (dump_file, "Iterations: %i\n", iterations);
3059 if (inlined)
3061 timevar_push (TV_INTEGRATION);
3062 todo |= optimize_inline_calls (current_function_decl);
3063 timevar_pop (TV_INTEGRATION);
3066 fun->always_inline_functions_inlined = true;
3068 return todo;
3071 /* Do inlining of small functions. Doing so early helps profiling and other
3072 passes to be somewhat more effective and avoids some code duplication in
3073 later real inlining pass for testcases with very many function calls. */
3075 namespace {
3077 const pass_data pass_data_early_inline =
3079 GIMPLE_PASS, /* type */
3080 "einline", /* name */
3081 OPTGROUP_INLINE, /* optinfo_flags */
3082 TV_EARLY_INLINING, /* tv_id */
3083 PROP_ssa, /* properties_required */
3084 0, /* properties_provided */
3085 0, /* properties_destroyed */
3086 0, /* todo_flags_start */
3087 0, /* todo_flags_finish */
3090 class pass_early_inline : public gimple_opt_pass
3092 public:
3093 pass_early_inline (gcc::context *ctxt)
3094 : gimple_opt_pass (pass_data_early_inline, ctxt)
3097 /* opt_pass methods: */
3098 virtual unsigned int execute (function *);
3100 }; // class pass_early_inline
3102 unsigned int
3103 pass_early_inline::execute (function *fun)
3105 return early_inliner (fun);
3108 } // anon namespace
3110 gimple_opt_pass *
3111 make_pass_early_inline (gcc::context *ctxt)
3113 return new pass_early_inline (ctxt);
3116 namespace {
3118 const pass_data pass_data_ipa_inline =
3120 IPA_PASS, /* type */
3121 "inline", /* name */
3122 OPTGROUP_INLINE, /* optinfo_flags */
3123 TV_IPA_INLINING, /* tv_id */
3124 0, /* properties_required */
3125 0, /* properties_provided */
3126 0, /* properties_destroyed */
3127 0, /* todo_flags_start */
3128 ( TODO_dump_symtab ), /* todo_flags_finish */
3131 class pass_ipa_inline : public ipa_opt_pass_d
3133 public:
3134 pass_ipa_inline (gcc::context *ctxt)
3135 : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
3136 NULL, /* generate_summary */
3137 NULL, /* write_summary */
3138 NULL, /* read_summary */
3139 NULL, /* write_optimization_summary */
3140 NULL, /* read_optimization_summary */
3141 NULL, /* stmt_fixup */
3142 0, /* function_transform_todo_flags_start */
3143 inline_transform, /* function_transform */
3144 NULL) /* variable_transform */
3147 /* opt_pass methods: */
3148 virtual unsigned int execute (function *) { return ipa_inline (); }
3150 }; // class pass_ipa_inline
3152 } // anon namespace
3154 ipa_opt_pass_d *
3155 make_pass_ipa_inline (gcc::context *ctxt)
3157 return new pass_ipa_inline (ctxt);