* ipa-inline-analysis.c (redirect_to_unreachable): New function.
[official-gcc.git] / gcc / ipa-inline.c
blob49af4cec13d55dc1975cf29d7fd836a83f7aa8bd
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
29 on).
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
34 inlining.
36 inlining heuristics
38 The inliner itself is split into two passes:
40 pass_early_inlining
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
55 flattening.
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
60 optimizers.
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
68 pass_ipa_inline
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "tm.h"
96 #include "hash-set.h"
97 #include "machmode.h"
98 #include "vec.h"
99 #include "double-int.h"
100 #include "input.h"
101 #include "alias.h"
102 #include "symtab.h"
103 #include "wide-int.h"
104 #include "inchash.h"
105 #include "tree.h"
106 #include "fold-const.h"
107 #include "trans-mem.h"
108 #include "calls.h"
109 #include "tree-inline.h"
110 #include "langhooks.h"
111 #include "flags.h"
112 #include "diagnostic.h"
113 #include "gimple-pretty-print.h"
114 #include "params.h"
115 #include "intl.h"
116 #include "tree-pass.h"
117 #include "coverage.h"
118 #include "rtl.h"
119 #include "bitmap.h"
120 #include "profile.h"
121 #include "predict.h"
122 #include "hard-reg-set.h"
123 #include "input.h"
124 #include "function.h"
125 #include "basic-block.h"
126 #include "tree-ssa-alias.h"
127 #include "internal-fn.h"
128 #include "gimple-expr.h"
129 #include "is-a.h"
130 #include "gimple.h"
131 #include "gimple-ssa.h"
132 #include "hash-map.h"
133 #include "plugin-api.h"
134 #include "ipa-ref.h"
135 #include "cgraph.h"
136 #include "alloc-pool.h"
137 #include "symbol-summary.h"
138 #include "ipa-prop.h"
139 #include "except.h"
140 #include "target.h"
141 #include "ipa-inline.h"
142 #include "ipa-utils.h"
143 #include "sreal.h"
144 #include "auto-profile.h"
145 #include "builtins.h"
146 #include "fibonacci_heap.h"
147 #include "lto-streamer.h"
149 typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
150 typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
152 /* Statistics we collect about inlining algorithm. */
153 static int overall_size;
154 static gcov_type max_count;
155 static gcov_type spec_rem;
157 /* Pre-computed constants 1/CGRAPH_FREQ_BASE and 1/100. */
158 static sreal cgraph_freq_base_rec, percent_rec;
160 /* Return false when inlining edge E would lead to violating
161 limits on function unit growth or stack usage growth.
163 The relative function body growth limit is present generally
164 to avoid problems with non-linear behavior of the compiler.
165 To allow inlining huge functions into tiny wrapper, the limit
166 is always based on the bigger of the two functions considered.
168 For stack growth limits we always base the growth in stack usage
169 of the callers. We want to prevent applications from segfaulting
170 on stack overflow when functions with huge stack frames gets
171 inlined. */
173 static bool
174 caller_growth_limits (struct cgraph_edge *e)
176 struct cgraph_node *to = e->caller;
177 struct cgraph_node *what = e->callee->ultimate_alias_target ();
178 int newsize;
179 int limit = 0;
180 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
181 inline_summary *info, *what_info, *outer_info = inline_summaries->get (to);
183 /* Look for function e->caller is inlined to. While doing
184 so work out the largest function body on the way. As
185 described above, we want to base our function growth
186 limits based on that. Not on the self size of the
187 outer function, not on the self size of inline code
188 we immediately inline to. This is the most relaxed
189 interpretation of the rule "do not grow large functions
190 too much in order to prevent compiler from exploding". */
191 while (true)
193 info = inline_summaries->get (to);
194 if (limit < info->self_size)
195 limit = info->self_size;
196 if (stack_size_limit < info->estimated_self_stack_size)
197 stack_size_limit = info->estimated_self_stack_size;
198 if (to->global.inlined_to)
199 to = to->callers->caller;
200 else
201 break;
204 what_info = inline_summaries->get (what);
206 if (limit < what_info->self_size)
207 limit = what_info->self_size;
209 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
211 /* Check the size after inlining against the function limits. But allow
212 the function to shrink if it went over the limits by forced inlining. */
213 newsize = estimate_size_after_inlining (to, e);
214 if (newsize >= info->size
215 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
216 && newsize > limit)
218 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
219 return false;
222 if (!what_info->estimated_stack_size)
223 return true;
225 /* FIXME: Stack size limit often prevents inlining in Fortran programs
226 due to large i/o datastructures used by the Fortran front-end.
227 We ought to ignore this limit when we know that the edge is executed
228 on every invocation of the caller (i.e. its call statement dominates
229 exit block). We do not track this information, yet. */
230 stack_size_limit += ((gcov_type)stack_size_limit
231 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
233 inlined_stack = (outer_info->stack_frame_offset
234 + outer_info->estimated_self_stack_size
235 + what_info->estimated_stack_size);
236 /* Check new stack consumption with stack consumption at the place
237 stack is used. */
238 if (inlined_stack > stack_size_limit
239 /* If function already has large stack usage from sibling
240 inline call, we can inline, too.
241 This bit overoptimistically assume that we are good at stack
242 packing. */
243 && inlined_stack > info->estimated_stack_size
244 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
246 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
247 return false;
249 return true;
252 /* Dump info about why inlining has failed. */
254 static void
255 report_inline_failed_reason (struct cgraph_edge *e)
257 if (dump_file)
259 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
260 xstrdup_for_dump (e->caller->name ()), e->caller->order,
261 xstrdup_for_dump (e->callee->name ()), e->callee->order,
262 cgraph_inline_failed_string (e->inline_failed));
263 if ((e->inline_failed == CIF_TARGET_OPTION_MISMATCH
264 || e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
265 && e->caller->lto_file_data
266 && e->callee->function_symbol ()->lto_file_data)
268 fprintf (dump_file, " LTO objects: %s, %s\n",
269 e->caller->lto_file_data->file_name,
270 e->callee->function_symbol ()->lto_file_data->file_name);
272 if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH)
273 cl_target_option_print_diff
274 (dump_file, 2, target_opts_for_fn (e->caller->decl),
275 target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
276 if (e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
277 cl_optimization_print_diff
278 (dump_file, 2, opts_for_fn (e->caller->decl),
279 opts_for_fn (e->callee->ultimate_alias_target ()->decl));
283 /* Decide whether sanitizer-related attributes allow inlining. */
285 static bool
286 sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
288 /* Don't care if sanitizer is disabled */
289 if (!(flag_sanitize & SANITIZE_ADDRESS))
290 return true;
292 if (!caller || !callee)
293 return true;
295 return !!lookup_attribute ("no_sanitize_address",
296 DECL_ATTRIBUTES (caller)) ==
297 !!lookup_attribute ("no_sanitize_address",
298 DECL_ATTRIBUTES (callee));
301 /* Decide if we can inline the edge and possibly update
302 inline_failed reason.
303 We check whether inlining is possible at all and whether
304 caller growth limits allow doing so.
306 if REPORT is true, output reason to the dump file.
308 if DISREGARD_LIMITS is true, ignore size limits.*/
310 static bool
311 can_inline_edge_p (struct cgraph_edge *e, bool report,
312 bool disregard_limits = false, bool early = false)
314 gcc_checking_assert (e->inline_failed);
316 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
318 if (report)
319 report_inline_failed_reason (e);
320 return false;
323 bool inlinable = true;
324 enum availability avail;
325 cgraph_node *callee = e->callee->ultimate_alias_target (&avail);
326 cgraph_node *caller = e->caller->global.inlined_to
327 ? e->caller->global.inlined_to : e->caller;
328 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl);
329 tree callee_tree
330 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
332 if (!callee->definition)
334 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
335 inlinable = false;
337 else if (callee->calls_comdat_local)
339 e->inline_failed = CIF_USES_COMDAT_LOCAL;
340 inlinable = false;
342 else if (avail <= AVAIL_INTERPOSABLE)
344 e->inline_failed = CIF_OVERWRITABLE;
345 inlinable = false;
347 else if (e->call_stmt_cannot_inline_p)
349 if (e->inline_failed != CIF_FUNCTION_NOT_OPTIMIZED)
350 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
351 inlinable = false;
353 /* Don't inline if the functions have different EH personalities. */
354 else if (DECL_FUNCTION_PERSONALITY (caller->decl)
355 && DECL_FUNCTION_PERSONALITY (callee->decl)
356 && (DECL_FUNCTION_PERSONALITY (caller->decl)
357 != DECL_FUNCTION_PERSONALITY (callee->decl)))
359 e->inline_failed = CIF_EH_PERSONALITY;
360 inlinable = false;
362 /* TM pure functions should not be inlined into non-TM_pure
363 functions. */
364 else if (is_tm_pure (callee->decl) && !is_tm_pure (caller->decl))
366 e->inline_failed = CIF_UNSPECIFIED;
367 inlinable = false;
369 /* Check compatibility of target optimization options. */
370 else if (!targetm.target_option.can_inline_p (caller->decl,
371 callee->decl))
373 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
374 inlinable = false;
376 else if (!inline_summaries->get (callee)->inlinable)
378 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
379 inlinable = false;
381 else if (inline_summaries->get (caller)->contains_cilk_spawn)
383 e->inline_failed = CIF_CILK_SPAWN;
384 inlinable = false;
386 /* Don't inline a function with mismatched sanitization attributes. */
387 else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl))
389 e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
390 inlinable = false;
392 /* Check if caller growth allows the inlining. */
393 else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
394 && !disregard_limits
395 && !lookup_attribute ("flatten",
396 DECL_ATTRIBUTES (caller->decl))
397 && !caller_growth_limits (e))
398 inlinable = false;
399 /* Don't inline a function with a higher optimization level than the
400 caller. FIXME: this is really just tip of iceberg of handling
401 optimization attribute. */
402 else if (caller_tree != callee_tree)
404 /* There are some options that change IL semantics which means
405 we cannot inline in these cases for correctness reason.
406 Not even for always_inline declared functions. */
407 /* Strictly speaking only when the callee contains signed integer
408 math where overflow is undefined. */
409 if ((opt_for_fn (caller->decl, flag_strict_overflow)
410 != opt_for_fn (callee->decl, flag_strict_overflow))
411 || (opt_for_fn (caller->decl, flag_wrapv)
412 != opt_for_fn (callee->decl, flag_wrapv))
413 || (opt_for_fn (caller->decl, flag_trapv)
414 != opt_for_fn (callee->decl, flag_trapv))
415 /* Strictly speaking only when the callee contains memory
416 accesses that are not using alias-set zero anyway. */
417 || (opt_for_fn (caller->decl, flag_strict_aliasing)
418 != opt_for_fn (callee->decl, flag_strict_aliasing))
419 /* Strictly speaking only when the callee uses FP math. */
420 || (opt_for_fn (caller->decl, flag_rounding_math)
421 != opt_for_fn (callee->decl, flag_rounding_math))
422 || (opt_for_fn (caller->decl, flag_trapping_math)
423 != opt_for_fn (callee->decl, flag_trapping_math))
424 || (opt_for_fn (caller->decl, flag_unsafe_math_optimizations)
425 != opt_for_fn (callee->decl, flag_unsafe_math_optimizations))
426 || (opt_for_fn (caller->decl, flag_finite_math_only)
427 != opt_for_fn (callee->decl, flag_finite_math_only))
428 || (opt_for_fn (caller->decl, flag_signaling_nans)
429 != opt_for_fn (callee->decl, flag_signaling_nans))
430 || (opt_for_fn (caller->decl, flag_cx_limited_range)
431 != opt_for_fn (callee->decl, flag_cx_limited_range))
432 || (opt_for_fn (caller->decl, flag_signed_zeros)
433 != opt_for_fn (callee->decl, flag_signed_zeros))
434 || (opt_for_fn (caller->decl, flag_associative_math)
435 != opt_for_fn (callee->decl, flag_associative_math))
436 || (opt_for_fn (caller->decl, flag_reciprocal_math)
437 != opt_for_fn (callee->decl, flag_reciprocal_math))
438 /* We do not want to make code compiled with exceptions to be brought
439 into a non-EH function unless we know that the callee does not
440 throw. This is tracked by DECL_FUNCTION_PERSONALITY. */
441 || (opt_for_fn (caller->decl, flag_non_call_exceptions)
442 != opt_for_fn (callee->decl, flag_non_call_exceptions)
443 /* TODO: We also may allow bringing !flag_non_call_exceptions
444 to flag_non_call_exceptions function, but that may need
445 extra work in tree-inline to add the extra EH edges. */
446 && (!opt_for_fn (callee->decl, flag_non_call_exceptions)
447 || DECL_FUNCTION_PERSONALITY (callee->decl)))
448 || (!opt_for_fn (caller->decl, flag_exceptions)
449 && opt_for_fn (callee->decl, flag_exceptions)
450 && DECL_FUNCTION_PERSONALITY (callee->decl))
451 /* Strictly speaking only when the callee contains function
452 calls that may end up setting errno. */
453 || (opt_for_fn (caller->decl, flag_errno_math)
454 != opt_for_fn (callee->decl, flag_errno_math))
455 /* When devirtualization is diabled for callee, it is not safe
456 to inline it as we possibly mangled the type info.
457 Allow early inlining of always inlines. */
458 || (opt_for_fn (caller->decl, flag_devirtualize)
459 && !opt_for_fn (callee->decl, flag_devirtualize)
460 && (!early
461 || (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
462 || !lookup_attribute ("always_inline",
463 DECL_ATTRIBUTES (callee->decl))))))
465 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
466 inlinable = false;
468 /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */
469 else if (DECL_DISREGARD_INLINE_LIMITS (callee->decl)
470 && lookup_attribute ("always_inline",
471 DECL_ATTRIBUTES (callee->decl)))
473 /* When user added an attribute to the callee honor it. */
474 else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee->decl))
475 && opts_for_fn (caller->decl) != opts_for_fn (callee->decl))
477 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
478 inlinable = false;
480 /* If mismatch is caused by merging two LTO units with different
481 optimizationflags we want to be bit nicer. However never inline
482 if one of functions is not optimized at all. */
483 else if (!opt_for_fn (callee->decl, optimize)
484 || !opt_for_fn (caller->decl, optimize))
486 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
487 inlinable = false;
489 /* If callee is optimized for size and caller is not, allow inlining if
490 code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee
491 is inline (and thus likely an unified comdat). This will allow caller
492 to run faster. */
493 else if (opt_for_fn (callee->decl, optimize_size)
494 > opt_for_fn (caller->decl, optimize_size))
496 int growth = estimate_edge_growth (e);
497 if (growth > 0
498 && (!DECL_DECLARED_INLINE_P (callee->decl)
499 && growth >= MAX (MAX_INLINE_INSNS_SINGLE,
500 MAX_INLINE_INSNS_AUTO)))
502 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
503 inlinable = false;
506 /* If callee is more aggressively optimized for performance than caller,
507 we generally want to inline only cheap (runtime wise) functions. */
508 else if (opt_for_fn (callee->decl, optimize_size)
509 < opt_for_fn (caller->decl, optimize_size)
510 || (opt_for_fn (callee->decl, optimize)
511 >= opt_for_fn (caller->decl, optimize)))
513 if (estimate_edge_time (e)
514 >= 20 + inline_edge_summary (e)->call_stmt_time)
516 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
517 inlinable = false;
523 if (!inlinable && report)
524 report_inline_failed_reason (e);
525 return inlinable;
529 /* Return true if the edge E is inlinable during early inlining. */
531 static bool
532 can_early_inline_edge_p (struct cgraph_edge *e)
534 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
535 /* Early inliner might get called at WPA stage when IPA pass adds new
536 function. In this case we can not really do any of early inlining
537 because function bodies are missing. */
538 if (!gimple_has_body_p (callee->decl))
540 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
541 return false;
543 /* In early inliner some of callees may not be in SSA form yet
544 (i.e. the callgraph is cyclic and we did not process
545 the callee by early inliner, yet). We don't have CIF code for this
546 case; later we will re-do the decision in the real inliner. */
547 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
548 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
550 if (dump_file)
551 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
552 return false;
554 if (!can_inline_edge_p (e, true, false, true))
555 return false;
556 return true;
560 /* Return number of calls in N. Ignore cheap builtins. */
562 static int
563 num_calls (struct cgraph_node *n)
565 struct cgraph_edge *e;
566 int num = 0;
568 for (e = n->callees; e; e = e->next_callee)
569 if (!is_inexpensive_builtin (e->callee->decl))
570 num++;
571 return num;
575 /* Return true if we are interested in inlining small function. */
577 static bool
578 want_early_inline_function_p (struct cgraph_edge *e)
580 bool want_inline = true;
581 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
583 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
585 /* For AutoFDO, we need to make sure that before profile summary, all
586 hot paths' IR look exactly the same as profiled binary. As a result,
587 in einliner, we will disregard size limit and inline those callsites
588 that are:
589 * inlined in the profiled binary, and
590 * the cloned callee has enough samples to be considered "hot". */
591 else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e))
593 else if (!DECL_DECLARED_INLINE_P (callee->decl)
594 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
596 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
597 report_inline_failed_reason (e);
598 want_inline = false;
600 else
602 int growth = estimate_edge_growth (e);
603 int n;
605 if (growth <= 0)
607 else if (!e->maybe_hot_p ()
608 && growth > 0)
610 if (dump_file)
611 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
612 "call is cold and code would grow by %i\n",
613 xstrdup_for_dump (e->caller->name ()),
614 e->caller->order,
615 xstrdup_for_dump (callee->name ()), callee->order,
616 growth);
617 want_inline = false;
619 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
621 if (dump_file)
622 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
623 "growth %i exceeds --param early-inlining-insns\n",
624 xstrdup_for_dump (e->caller->name ()),
625 e->caller->order,
626 xstrdup_for_dump (callee->name ()), callee->order,
627 growth);
628 want_inline = false;
630 else if ((n = num_calls (callee)) != 0
631 && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
633 if (dump_file)
634 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
635 "growth %i exceeds --param early-inlining-insns "
636 "divided by number of calls\n",
637 xstrdup_for_dump (e->caller->name ()),
638 e->caller->order,
639 xstrdup_for_dump (callee->name ()), callee->order,
640 growth);
641 want_inline = false;
644 return want_inline;
647 /* Compute time of the edge->caller + edge->callee execution when inlining
648 does not happen. */
650 inline sreal
651 compute_uninlined_call_time (struct inline_summary *callee_info,
652 struct cgraph_edge *edge)
654 sreal uninlined_call_time = (sreal)callee_info->time;
655 cgraph_node *caller = (edge->caller->global.inlined_to
656 ? edge->caller->global.inlined_to
657 : edge->caller);
659 if (edge->count && caller->count)
660 uninlined_call_time *= (sreal)edge->count / caller->count;
661 if (edge->frequency)
662 uninlined_call_time *= cgraph_freq_base_rec * edge->frequency;
663 else
664 uninlined_call_time = uninlined_call_time >> 11;
666 int caller_time = inline_summaries->get (caller)->time;
667 return uninlined_call_time + caller_time;
670 /* Same as compute_uinlined_call_time but compute time when inlining
671 does happen. */
673 inline sreal
674 compute_inlined_call_time (struct cgraph_edge *edge,
675 int edge_time)
677 cgraph_node *caller = (edge->caller->global.inlined_to
678 ? edge->caller->global.inlined_to
679 : edge->caller);
680 int caller_time = inline_summaries->get (caller)->time;
681 sreal time = edge_time;
683 if (edge->count && caller->count)
684 time *= (sreal)edge->count / caller->count;
685 if (edge->frequency)
686 time *= cgraph_freq_base_rec * edge->frequency;
687 else
688 time = time >> 11;
690 /* This calculation should match one in ipa-inline-analysis.
691 FIXME: Once ipa-inline-analysis is converted to sreal this can be
692 simplified. */
693 time -= (sreal) ((gcov_type) edge->frequency
694 * inline_edge_summary (edge)->call_stmt_time
695 * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE)) / INLINE_TIME_SCALE;
696 time += caller_time;
697 if (time <= 0)
698 time = ((sreal) 1) >> 8;
699 gcc_checking_assert (time >= 0);
700 return time;
703 /* Return true if the speedup for inlining E is bigger than
704 PARAM_MAX_INLINE_MIN_SPEEDUP. */
706 static bool
707 big_speedup_p (struct cgraph_edge *e)
709 sreal time = compute_uninlined_call_time (inline_summaries->get (e->callee),
711 sreal inlined_time = compute_inlined_call_time (e, estimate_edge_time (e));
713 if (time - inlined_time
714 > (sreal) time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP)
715 * percent_rec)
716 return true;
717 return false;
720 /* Return true if we are interested in inlining small function.
721 When REPORT is true, report reason to dump file. */
723 static bool
724 want_inline_small_function_p (struct cgraph_edge *e, bool report)
726 bool want_inline = true;
727 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
729 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
731 else if (!DECL_DECLARED_INLINE_P (callee->decl)
732 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
734 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
735 want_inline = false;
737 /* Do fast and conservative check if the function can be good
738 inline candidate. At the moment we allow inline hints to
739 promote non-inline functions to inline and we increase
740 MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
741 else if ((!DECL_DECLARED_INLINE_P (callee->decl)
742 && (!e->count || !e->maybe_hot_p ()))
743 && inline_summaries->get (callee)->min_size
744 - inline_edge_summary (e)->call_stmt_size
745 > MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
747 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
748 want_inline = false;
750 else if ((DECL_DECLARED_INLINE_P (callee->decl) || e->count)
751 && inline_summaries->get (callee)->min_size
752 - inline_edge_summary (e)->call_stmt_size
753 > 16 * MAX_INLINE_INSNS_SINGLE)
755 e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
756 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
757 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
758 want_inline = false;
760 else
762 int growth = estimate_edge_growth (e);
763 inline_hints hints = estimate_edge_hints (e);
764 bool big_speedup = big_speedup_p (e);
766 if (growth <= 0)
768 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
769 hints suggests that inlining given function is very profitable. */
770 else if (DECL_DECLARED_INLINE_P (callee->decl)
771 && growth >= MAX_INLINE_INSNS_SINGLE
772 && ((!big_speedup
773 && !(hints & (INLINE_HINT_indirect_call
774 | INLINE_HINT_known_hot
775 | INLINE_HINT_loop_iterations
776 | INLINE_HINT_array_index
777 | INLINE_HINT_loop_stride)))
778 || growth >= MAX_INLINE_INSNS_SINGLE * 16))
780 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
781 want_inline = false;
783 else if (!DECL_DECLARED_INLINE_P (callee->decl)
784 && !opt_for_fn (e->caller->decl, flag_inline_functions))
786 /* growth_likely_positive is expensive, always test it last. */
787 if (growth >= MAX_INLINE_INSNS_SINGLE
788 || growth_likely_positive (callee, growth))
790 e->inline_failed = CIF_NOT_DECLARED_INLINED;
791 want_inline = false;
794 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
795 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
796 inlining given function is very profitable. */
797 else if (!DECL_DECLARED_INLINE_P (callee->decl)
798 && !big_speedup
799 && !(hints & INLINE_HINT_known_hot)
800 && growth >= ((hints & (INLINE_HINT_indirect_call
801 | INLINE_HINT_loop_iterations
802 | INLINE_HINT_array_index
803 | INLINE_HINT_loop_stride))
804 ? MAX (MAX_INLINE_INSNS_AUTO,
805 MAX_INLINE_INSNS_SINGLE)
806 : MAX_INLINE_INSNS_AUTO))
808 /* growth_likely_positive is expensive, always test it last. */
809 if (growth >= MAX_INLINE_INSNS_SINGLE
810 || growth_likely_positive (callee, growth))
812 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
813 want_inline = false;
816 /* If call is cold, do not inline when function body would grow. */
817 else if (!e->maybe_hot_p ()
818 && (growth >= MAX_INLINE_INSNS_SINGLE
819 || growth_likely_positive (callee, growth)))
821 e->inline_failed = CIF_UNLIKELY_CALL;
822 want_inline = false;
825 if (!want_inline && report)
826 report_inline_failed_reason (e);
827 return want_inline;
830 /* EDGE is self recursive edge.
831 We hand two cases - when function A is inlining into itself
832 or when function A is being inlined into another inliner copy of function
833 A within function B.
835 In first case OUTER_NODE points to the toplevel copy of A, while
836 in the second case OUTER_NODE points to the outermost copy of A in B.
838 In both cases we want to be extra selective since
839 inlining the call will just introduce new recursive calls to appear. */
841 static bool
842 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
843 struct cgraph_node *outer_node,
844 bool peeling,
845 int depth)
847 char const *reason = NULL;
848 bool want_inline = true;
849 int caller_freq = CGRAPH_FREQ_BASE;
850 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
852 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
853 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
855 if (!edge->maybe_hot_p ())
857 reason = "recursive call is cold";
858 want_inline = false;
860 else if (max_count && !outer_node->count)
862 reason = "not executed in profile";
863 want_inline = false;
865 else if (depth > max_depth)
867 reason = "--param max-inline-recursive-depth exceeded.";
868 want_inline = false;
871 if (outer_node->global.inlined_to)
872 caller_freq = outer_node->callers->frequency;
874 if (!caller_freq)
876 reason = "function is inlined and unlikely";
877 want_inline = false;
880 if (!want_inline)
882 /* Inlining of self recursive function into copy of itself within other function
883 is transformation similar to loop peeling.
885 Peeling is profitable if we can inline enough copies to make probability
886 of actual call to the self recursive function very small. Be sure that
887 the probability of recursion is small.
889 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
890 This way the expected number of recision is at most max_depth. */
891 else if (peeling)
893 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
894 / max_depth);
895 int i;
896 for (i = 1; i < depth; i++)
897 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
898 if (max_count
899 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
900 >= max_prob))
902 reason = "profile of recursive call is too large";
903 want_inline = false;
905 if (!max_count
906 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
907 >= max_prob))
909 reason = "frequency of recursive call is too large";
910 want_inline = false;
913 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
914 depth is large. We reduce function call overhead and increase chances that
915 things fit in hardware return predictor.
917 Recursive inlining might however increase cost of stack frame setup
918 actually slowing down functions whose recursion tree is wide rather than
919 deep.
921 Deciding reliably on when to do recursive inlining without profile feedback
922 is tricky. For now we disable recursive inlining when probability of self
923 recursion is low.
925 Recursive inlining of self recursive call within loop also results in large loop
926 depths that generally optimize badly. We may want to throttle down inlining
927 in those cases. In particular this seems to happen in one of libstdc++ rb tree
928 methods. */
929 else
931 if (max_count
932 && (edge->count * 100 / outer_node->count
933 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
935 reason = "profile of recursive call is too small";
936 want_inline = false;
938 else if (!max_count
939 && (edge->frequency * 100 / caller_freq
940 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
942 reason = "frequency of recursive call is too small";
943 want_inline = false;
946 if (!want_inline && dump_file)
947 fprintf (dump_file, " not inlining recursively: %s\n", reason);
948 return want_inline;
951 /* Return true when NODE has uninlinable caller;
952 set HAS_HOT_CALL if it has hot call.
953 Worker for cgraph_for_node_and_aliases. */
955 static bool
956 check_callers (struct cgraph_node *node, void *has_hot_call)
958 struct cgraph_edge *e;
959 for (e = node->callers; e; e = e->next_caller)
961 if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once))
962 return true;
963 if (!can_inline_edge_p (e, true))
964 return true;
965 if (e->recursive_p ())
966 return true;
967 if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
968 *(bool *)has_hot_call = true;
970 return false;
973 /* If NODE has a caller, return true. */
975 static bool
976 has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
978 if (node->callers)
979 return true;
980 return false;
983 /* Decide if inlining NODE would reduce unit size by eliminating
984 the offline copy of function.
985 When COLD is true the cold calls are considered, too. */
987 static bool
988 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
990 bool has_hot_call = false;
992 /* Aliases gets inlined along with the function they alias. */
993 if (node->alias)
994 return false;
995 /* Already inlined? */
996 if (node->global.inlined_to)
997 return false;
998 /* Does it have callers? */
999 if (!node->call_for_symbol_and_aliases (has_caller_p, NULL, true))
1000 return false;
1001 /* Inlining into all callers would increase size? */
1002 if (estimate_growth (node) > 0)
1003 return false;
1004 /* All inlines must be possible. */
1005 if (node->call_for_symbol_and_aliases (check_callers, &has_hot_call,
1006 true))
1007 return false;
1008 if (!cold && !has_hot_call)
1009 return false;
1010 return true;
1013 /* A cost model driving the inlining heuristics in a way so the edges with
1014 smallest badness are inlined first. After each inlining is performed
1015 the costs of all caller edges of nodes affected are recomputed so the
1016 metrics may accurately depend on values such as number of inlinable callers
1017 of the function or function body size. */
1019 static sreal
1020 edge_badness (struct cgraph_edge *edge, bool dump)
1022 sreal badness;
1023 int growth, edge_time;
1024 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1025 struct inline_summary *callee_info = inline_summaries->get (callee);
1026 inline_hints hints;
1027 cgraph_node *caller = (edge->caller->global.inlined_to
1028 ? edge->caller->global.inlined_to
1029 : edge->caller);
1031 growth = estimate_edge_growth (edge);
1032 edge_time = estimate_edge_time (edge);
1033 hints = estimate_edge_hints (edge);
1034 gcc_checking_assert (edge_time >= 0);
1035 gcc_checking_assert (edge_time <= callee_info->time);
1036 gcc_checking_assert (growth <= callee_info->size);
1038 if (dump)
1040 fprintf (dump_file, " Badness calculation for %s/%i -> %s/%i\n",
1041 xstrdup_for_dump (edge->caller->name ()),
1042 edge->caller->order,
1043 xstrdup_for_dump (callee->name ()),
1044 edge->callee->order);
1045 fprintf (dump_file, " size growth %i, time %i ",
1046 growth,
1047 edge_time);
1048 dump_inline_hints (dump_file, hints);
1049 if (big_speedup_p (edge))
1050 fprintf (dump_file, " big_speedup");
1051 fprintf (dump_file, "\n");
1054 /* Always prefer inlining saving code size. */
1055 if (growth <= 0)
1057 badness = (sreal) (-SREAL_MIN_SIG + growth) << (SREAL_MAX_EXP / 256);
1058 if (dump)
1059 fprintf (dump_file, " %f: Growth %d <= 0\n", badness.to_double (),
1060 growth);
1062 /* Inlining into EXTERNAL functions is not going to change anything unless
1063 they are themselves inlined. */
1064 else if (DECL_EXTERNAL (caller->decl))
1066 if (dump)
1067 fprintf (dump_file, " max: function is external\n");
1068 return sreal::max ();
1070 /* When profile is available. Compute badness as:
1072 time_saved * caller_count
1073 goodness = ---------------------------------
1074 growth_of_caller * overall_growth
1076 badness = - goodness
1078 Again use negative value to make calls with profile appear hotter
1079 then calls without.
1081 else if (opt_for_fn (caller->decl, flag_guess_branch_prob) || caller->count)
1083 sreal numerator, denominator;
1085 numerator = (compute_uninlined_call_time (callee_info, edge)
1086 - compute_inlined_call_time (edge, edge_time));
1087 if (numerator == 0)
1088 numerator = ((sreal) 1 >> 8);
1089 if (caller->count)
1090 numerator *= caller->count;
1091 else if (opt_for_fn (caller->decl, flag_branch_probabilities))
1092 numerator = numerator >> 11;
1093 denominator = growth;
1094 if (callee_info->growth > 0)
1095 denominator *= callee_info->growth;
1097 badness = - numerator / denominator;
1099 if (dump)
1101 fprintf (dump_file,
1102 " %f: guessed profile. frequency %f, count %"PRId64
1103 " caller count %"PRId64
1104 " time w/o inlining %f, time w inlining %f"
1105 " overall growth %i (current) %i (original)\n",
1106 badness.to_double (), (double)edge->frequency / CGRAPH_FREQ_BASE,
1107 edge->count, caller->count,
1108 compute_uninlined_call_time (callee_info, edge).to_double (),
1109 compute_inlined_call_time (edge, edge_time).to_double (),
1110 estimate_growth (callee),
1111 callee_info->growth);
1114 /* When function local profile is not available or it does not give
1115 useful information (ie frequency is zero), base the cost on
1116 loop nest and overall size growth, so we optimize for overall number
1117 of functions fully inlined in program. */
1118 else
1120 int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
1121 badness = growth;
1123 /* Decrease badness if call is nested. */
1124 if (badness > 0)
1125 badness = badness >> nest;
1126 else
1127 badness = badness << nest;
1128 if (dump)
1129 fprintf (dump_file, " %f: no profile. nest %i\n", badness.to_double (),
1130 nest);
1132 gcc_checking_assert (badness != 0);
1134 if (edge->recursive_p ())
1135 badness = badness.shift (badness > 0 ? 4 : -4);
1136 if ((hints & (INLINE_HINT_indirect_call
1137 | INLINE_HINT_loop_iterations
1138 | INLINE_HINT_array_index
1139 | INLINE_HINT_loop_stride))
1140 || callee_info->growth <= 0)
1141 badness = badness.shift (badness > 0 ? -2 : 2);
1142 if (hints & (INLINE_HINT_same_scc))
1143 badness = badness.shift (badness > 0 ? 3 : -3);
1144 else if (hints & (INLINE_HINT_in_scc))
1145 badness = badness.shift (badness > 0 ? 2 : -2);
1146 else if (hints & (INLINE_HINT_cross_module))
1147 badness = badness.shift (badness > 0 ? 1 : -1);
1148 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1149 badness = badness.shift (badness > 0 ? -4 : 4);
1150 else if ((hints & INLINE_HINT_declared_inline))
1151 badness = badness.shift (badness > 0 ? -3 : 3);
1152 if (dump)
1153 fprintf (dump_file, " Adjusted by hints %f\n", badness.to_double ());
1154 return badness;
1157 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1158 static inline void
1159 update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
1161 sreal badness = edge_badness (edge, false);
1162 if (edge->aux)
1164 edge_heap_node_t *n = (edge_heap_node_t *) edge->aux;
1165 gcc_checking_assert (n->get_data () == edge);
1167 /* fibonacci_heap::replace_key does busy updating of the
1168 heap that is unnecesarily expensive.
1169 We do lazy increases: after extracting minimum if the key
1170 turns out to be out of date, it is re-inserted into heap
1171 with correct value. */
1172 if (badness < n->get_key ())
1174 if (dump_file && (dump_flags & TDF_DETAILS))
1176 fprintf (dump_file,
1177 " decreasing badness %s/%i -> %s/%i, %f"
1178 " to %f\n",
1179 xstrdup_for_dump (edge->caller->name ()),
1180 edge->caller->order,
1181 xstrdup_for_dump (edge->callee->name ()),
1182 edge->callee->order,
1183 n->get_key ().to_double (),
1184 badness.to_double ());
1186 heap->decrease_key (n, badness);
1189 else
1191 if (dump_file && (dump_flags & TDF_DETAILS))
1193 fprintf (dump_file,
1194 " enqueuing call %s/%i -> %s/%i, badness %f\n",
1195 xstrdup_for_dump (edge->caller->name ()),
1196 edge->caller->order,
1197 xstrdup_for_dump (edge->callee->name ()),
1198 edge->callee->order,
1199 badness.to_double ());
1201 edge->aux = heap->insert (badness, edge);
1206 /* NODE was inlined.
1207 All caller edges needs to be resetted because
1208 size estimates change. Similarly callees needs reset
1209 because better context may be known. */
1211 static void
1212 reset_edge_caches (struct cgraph_node *node)
1214 struct cgraph_edge *edge;
1215 struct cgraph_edge *e = node->callees;
1216 struct cgraph_node *where = node;
1217 struct ipa_ref *ref;
1219 if (where->global.inlined_to)
1220 where = where->global.inlined_to;
1222 for (edge = where->callers; edge; edge = edge->next_caller)
1223 if (edge->inline_failed)
1224 reset_edge_growth_cache (edge);
1226 FOR_EACH_ALIAS (where, ref)
1227 reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
1229 if (!e)
1230 return;
1232 while (true)
1233 if (!e->inline_failed && e->callee->callees)
1234 e = e->callee->callees;
1235 else
1237 if (e->inline_failed)
1238 reset_edge_growth_cache (e);
1239 if (e->next_callee)
1240 e = e->next_callee;
1241 else
1245 if (e->caller == node)
1246 return;
1247 e = e->caller->callers;
1249 while (!e->next_callee);
1250 e = e->next_callee;
1255 /* Recompute HEAP nodes for each of caller of NODE.
1256 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1257 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1258 it is inlinable. Otherwise check all edges. */
1260 static void
1261 update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
1262 bitmap updated_nodes,
1263 struct cgraph_edge *check_inlinablity_for)
1265 struct cgraph_edge *edge;
1266 struct ipa_ref *ref;
1268 if ((!node->alias && !inline_summaries->get (node)->inlinable)
1269 || node->global.inlined_to)
1270 return;
1271 if (!bitmap_set_bit (updated_nodes, node->uid))
1272 return;
1274 FOR_EACH_ALIAS (node, ref)
1276 struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
1277 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1280 for (edge = node->callers; edge; edge = edge->next_caller)
1281 if (edge->inline_failed)
1283 if (!check_inlinablity_for
1284 || check_inlinablity_for == edge)
1286 if (can_inline_edge_p (edge, false)
1287 && want_inline_small_function_p (edge, false))
1288 update_edge_key (heap, edge);
1289 else if (edge->aux)
1291 report_inline_failed_reason (edge);
1292 heap->delete_node ((edge_heap_node_t *) edge->aux);
1293 edge->aux = NULL;
1296 else if (edge->aux)
1297 update_edge_key (heap, edge);
1301 /* Recompute HEAP nodes for each uninlined call in NODE.
1302 This is used when we know that edge badnesses are going only to increase
1303 (we introduced new call site) and thus all we need is to insert newly
1304 created edges into heap. */
1306 static void
1307 update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
1308 bitmap updated_nodes)
1310 struct cgraph_edge *e = node->callees;
1312 if (!e)
1313 return;
1314 while (true)
1315 if (!e->inline_failed && e->callee->callees)
1316 e = e->callee->callees;
1317 else
1319 enum availability avail;
1320 struct cgraph_node *callee;
1321 /* We do not reset callee growth cache here. Since we added a new call,
1322 growth chould have just increased and consequentely badness metric
1323 don't need updating. */
1324 if (e->inline_failed
1325 && (callee = e->callee->ultimate_alias_target (&avail))
1326 && inline_summaries->get (callee)->inlinable
1327 && avail >= AVAIL_AVAILABLE
1328 && !bitmap_bit_p (updated_nodes, callee->uid))
1330 if (can_inline_edge_p (e, false)
1331 && want_inline_small_function_p (e, false))
1332 update_edge_key (heap, e);
1333 else if (e->aux)
1335 report_inline_failed_reason (e);
1336 heap->delete_node ((edge_heap_node_t *) e->aux);
1337 e->aux = NULL;
1340 if (e->next_callee)
1341 e = e->next_callee;
1342 else
1346 if (e->caller == node)
1347 return;
1348 e = e->caller->callers;
1350 while (!e->next_callee);
1351 e = e->next_callee;
1356 /* Enqueue all recursive calls from NODE into priority queue depending on
1357 how likely we want to recursively inline the call. */
1359 static void
1360 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1361 edge_heap_t *heap)
1363 struct cgraph_edge *e;
1364 enum availability avail;
1366 for (e = where->callees; e; e = e->next_callee)
1367 if (e->callee == node
1368 || (e->callee->ultimate_alias_target (&avail) == node
1369 && avail > AVAIL_INTERPOSABLE))
1371 /* When profile feedback is available, prioritize by expected number
1372 of calls. */
1373 heap->insert (!max_count ? -e->frequency
1374 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
1377 for (e = where->callees; e; e = e->next_callee)
1378 if (!e->inline_failed)
1379 lookup_recursive_calls (node, e->callee, heap);
1382 /* Decide on recursive inlining: in the case function has recursive calls,
1383 inline until body size reaches given argument. If any new indirect edges
1384 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1385 is NULL. */
1387 static bool
1388 recursive_inlining (struct cgraph_edge *edge,
1389 vec<cgraph_edge *> *new_edges)
1391 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1392 edge_heap_t heap (sreal::min ());
1393 struct cgraph_node *node;
1394 struct cgraph_edge *e;
1395 struct cgraph_node *master_clone = NULL, *next;
1396 int depth = 0;
1397 int n = 0;
1399 node = edge->caller;
1400 if (node->global.inlined_to)
1401 node = node->global.inlined_to;
1403 if (DECL_DECLARED_INLINE_P (node->decl))
1404 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1406 /* Make sure that function is small enough to be considered for inlining. */
1407 if (estimate_size_after_inlining (node, edge) >= limit)
1408 return false;
1409 lookup_recursive_calls (node, node, &heap);
1410 if (heap.empty ())
1411 return false;
1413 if (dump_file)
1414 fprintf (dump_file,
1415 " Performing recursive inlining on %s\n",
1416 node->name ());
1418 /* Do the inlining and update list of recursive call during process. */
1419 while (!heap.empty ())
1421 struct cgraph_edge *curr = heap.extract_min ();
1422 struct cgraph_node *cnode, *dest = curr->callee;
1424 if (!can_inline_edge_p (curr, true))
1425 continue;
1427 /* MASTER_CLONE is produced in the case we already started modified
1428 the function. Be sure to redirect edge to the original body before
1429 estimating growths otherwise we will be seeing growths after inlining
1430 the already modified body. */
1431 if (master_clone)
1433 curr->redirect_callee (master_clone);
1434 reset_edge_growth_cache (curr);
1437 if (estimate_size_after_inlining (node, curr) > limit)
1439 curr->redirect_callee (dest);
1440 reset_edge_growth_cache (curr);
1441 break;
1444 depth = 1;
1445 for (cnode = curr->caller;
1446 cnode->global.inlined_to; cnode = cnode->callers->caller)
1447 if (node->decl
1448 == curr->callee->ultimate_alias_target ()->decl)
1449 depth++;
1451 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1453 curr->redirect_callee (dest);
1454 reset_edge_growth_cache (curr);
1455 continue;
1458 if (dump_file)
1460 fprintf (dump_file,
1461 " Inlining call of depth %i", depth);
1462 if (node->count)
1464 fprintf (dump_file, " called approx. %.2f times per call",
1465 (double)curr->count / node->count);
1467 fprintf (dump_file, "\n");
1469 if (!master_clone)
1471 /* We need original clone to copy around. */
1472 master_clone = node->create_clone (node->decl, node->count,
1473 CGRAPH_FREQ_BASE, false, vNULL,
1474 true, NULL, NULL);
1475 for (e = master_clone->callees; e; e = e->next_callee)
1476 if (!e->inline_failed)
1477 clone_inlined_nodes (e, true, false, NULL, CGRAPH_FREQ_BASE);
1478 curr->redirect_callee (master_clone);
1479 reset_edge_growth_cache (curr);
1482 inline_call (curr, false, new_edges, &overall_size, true);
1483 lookup_recursive_calls (node, curr->callee, &heap);
1484 n++;
1487 if (!heap.empty () && dump_file)
1488 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1490 if (!master_clone)
1491 return false;
1493 if (dump_file)
1494 fprintf (dump_file,
1495 "\n Inlined %i times, "
1496 "body grown from size %i to %i, time %i to %i\n", n,
1497 inline_summaries->get (master_clone)->size, inline_summaries->get (node)->size,
1498 inline_summaries->get (master_clone)->time, inline_summaries->get (node)->time);
1500 /* Remove master clone we used for inlining. We rely that clones inlined
1501 into master clone gets queued just before master clone so we don't
1502 need recursion. */
1503 for (node = symtab->first_function (); node != master_clone;
1504 node = next)
1506 next = symtab->next_function (node);
1507 if (node->global.inlined_to == master_clone)
1508 node->remove ();
1510 master_clone->remove ();
1511 return true;
1515 /* Given whole compilation unit estimate of INSNS, compute how large we can
1516 allow the unit to grow. */
1518 static int
1519 compute_max_insns (int insns)
1521 int max_insns = insns;
1522 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1523 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1525 return ((int64_t) max_insns
1526 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1530 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1532 static void
1533 add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges)
1535 while (new_edges.length () > 0)
1537 struct cgraph_edge *edge = new_edges.pop ();
1539 gcc_assert (!edge->aux);
1540 if (edge->inline_failed
1541 && can_inline_edge_p (edge, true)
1542 && want_inline_small_function_p (edge, true))
1543 edge->aux = heap->insert (edge_badness (edge, false), edge);
1547 /* Remove EDGE from the fibheap. */
1549 static void
1550 heap_edge_removal_hook (struct cgraph_edge *e, void *data)
1552 if (e->aux)
1554 ((edge_heap_t *)data)->delete_node ((edge_heap_node_t *)e->aux);
1555 e->aux = NULL;
1559 /* Return true if speculation of edge E seems useful.
1560 If ANTICIPATE_INLINING is true, be conservative and hope that E
1561 may get inlined. */
1563 bool
1564 speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
1566 enum availability avail;
1567 struct cgraph_node *target = e->callee->ultimate_alias_target (&avail);
1568 struct cgraph_edge *direct, *indirect;
1569 struct ipa_ref *ref;
1571 gcc_assert (e->speculative && !e->indirect_unknown_callee);
1573 if (!e->maybe_hot_p ())
1574 return false;
1576 /* See if IP optimizations found something potentially useful about the
1577 function. For now we look only for CONST/PURE flags. Almost everything
1578 else we propagate is useless. */
1579 if (avail >= AVAIL_AVAILABLE)
1581 int ecf_flags = flags_from_decl_or_type (target->decl);
1582 if (ecf_flags & ECF_CONST)
1584 e->speculative_call_info (direct, indirect, ref);
1585 if (!(indirect->indirect_info->ecf_flags & ECF_CONST))
1586 return true;
1588 else if (ecf_flags & ECF_PURE)
1590 e->speculative_call_info (direct, indirect, ref);
1591 if (!(indirect->indirect_info->ecf_flags & ECF_PURE))
1592 return true;
1595 /* If we did not managed to inline the function nor redirect
1596 to an ipa-cp clone (that are seen by having local flag set),
1597 it is probably pointless to inline it unless hardware is missing
1598 indirect call predictor. */
1599 if (!anticipate_inlining && e->inline_failed && !target->local.local)
1600 return false;
1601 /* For overwritable targets there is not much to do. */
1602 if (e->inline_failed && !can_inline_edge_p (e, false, true))
1603 return false;
1604 /* OK, speculation seems interesting. */
1605 return true;
1608 /* We know that EDGE is not going to be inlined.
1609 See if we can remove speculation. */
1611 static void
1612 resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
1614 if (edge->speculative && !speculation_useful_p (edge, false))
1616 struct cgraph_node *node = edge->caller;
1617 struct cgraph_node *where = node->global.inlined_to
1618 ? node->global.inlined_to : node;
1619 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1621 spec_rem += edge->count;
1622 edge->resolve_speculation ();
1623 reset_edge_caches (where);
1624 inline_update_overall_summary (where);
1625 update_caller_keys (edge_heap, where,
1626 updated_nodes, NULL);
1627 update_callee_keys (edge_heap, where,
1628 updated_nodes);
1629 BITMAP_FREE (updated_nodes);
1633 /* Return true if NODE should be accounted for overall size estimate.
1634 Skip all nodes optimized for size so we can measure the growth of hot
1635 part of program no matter of the padding. */
1637 bool
1638 inline_account_function_p (struct cgraph_node *node)
1640 return (!DECL_EXTERNAL (node->decl)
1641 && !opt_for_fn (node->decl, optimize_size)
1642 && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED);
1645 /* We use greedy algorithm for inlining of small functions:
1646 All inline candidates are put into prioritized heap ordered in
1647 increasing badness.
1649 The inlining of small functions is bounded by unit growth parameters. */
1651 static void
1652 inline_small_functions (void)
1654 struct cgraph_node *node;
1655 struct cgraph_edge *edge;
1656 edge_heap_t edge_heap (sreal::min ());
1657 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1658 int min_size, max_size;
1659 auto_vec<cgraph_edge *> new_indirect_edges;
1660 int initial_size = 0;
1661 struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
1662 struct cgraph_edge_hook_list *edge_removal_hook_holder;
1663 new_indirect_edges.create (8);
1665 edge_removal_hook_holder
1666 = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap);
1668 /* Compute overall unit size and other global parameters used by badness
1669 metrics. */
1671 max_count = 0;
1672 ipa_reduced_postorder (order, true, true, NULL);
1673 free (order);
1675 FOR_EACH_DEFINED_FUNCTION (node)
1676 if (!node->global.inlined_to)
1678 if (!node->alias && node->analyzed
1679 && (node->has_gimple_body_p () || node->thunk.thunk_p))
1681 struct inline_summary *info = inline_summaries->get (node);
1682 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
1684 /* Do not account external functions, they will be optimized out
1685 if not inlined. Also only count the non-cold portion of program. */
1686 if (inline_account_function_p (node))
1687 initial_size += info->size;
1688 info->growth = estimate_growth (node);
1689 if (dfs && dfs->next_cycle)
1691 struct cgraph_node *n2;
1692 int id = dfs->scc_no + 1;
1693 for (n2 = node; n2;
1694 n2 = ((struct ipa_dfs_info *) node->aux)->next_cycle)
1696 struct inline_summary *info2 = inline_summaries->get (n2);
1697 if (info2->scc_no)
1698 break;
1699 info2->scc_no = id;
1704 for (edge = node->callers; edge; edge = edge->next_caller)
1705 if (max_count < edge->count)
1706 max_count = edge->count;
1708 ipa_free_postorder_info ();
1709 initialize_growth_caches ();
1711 if (dump_file)
1712 fprintf (dump_file,
1713 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1714 initial_size);
1716 overall_size = initial_size;
1717 max_size = compute_max_insns (overall_size);
1718 min_size = overall_size;
1720 /* Populate the heap with all edges we might inline. */
1722 FOR_EACH_DEFINED_FUNCTION (node)
1724 bool update = false;
1725 struct cgraph_edge *next = NULL;
1726 bool has_speculative = false;
1728 if (dump_file)
1729 fprintf (dump_file, "Enqueueing calls in %s/%i.\n",
1730 node->name (), node->order);
1732 for (edge = node->callees; edge; edge = next)
1734 next = edge->next_callee;
1735 if (edge->inline_failed
1736 && !edge->aux
1737 && can_inline_edge_p (edge, true)
1738 && want_inline_small_function_p (edge, true)
1739 && edge->inline_failed)
1741 gcc_assert (!edge->aux);
1742 update_edge_key (&edge_heap, edge);
1744 if (edge->speculative)
1745 has_speculative = true;
1747 if (has_speculative)
1748 for (edge = node->callees; edge; edge = next)
1749 if (edge->speculative && !speculation_useful_p (edge,
1750 edge->aux != NULL))
1752 edge->resolve_speculation ();
1753 update = true;
1755 if (update)
1757 struct cgraph_node *where = node->global.inlined_to
1758 ? node->global.inlined_to : node;
1759 inline_update_overall_summary (where);
1760 reset_edge_caches (where);
1761 update_caller_keys (&edge_heap, where,
1762 updated_nodes, NULL);
1763 update_callee_keys (&edge_heap, where,
1764 updated_nodes);
1765 bitmap_clear (updated_nodes);
1769 gcc_assert (in_lto_p
1770 || !max_count
1771 || (profile_info && flag_branch_probabilities));
1773 while (!edge_heap.empty ())
1775 int old_size = overall_size;
1776 struct cgraph_node *where, *callee;
1777 sreal badness = edge_heap.min_key ();
1778 sreal current_badness;
1779 int growth;
1781 edge = edge_heap.extract_min ();
1782 gcc_assert (edge->aux);
1783 edge->aux = NULL;
1784 if (!edge->inline_failed || !edge->callee->analyzed)
1785 continue;
1787 #ifdef ENABLE_CHECKING
1788 /* Be sure that caches are maintained consistent. */
1789 sreal cached_badness = edge_badness (edge, false);
1791 int old_size_est = estimate_edge_size (edge);
1792 int old_time_est = estimate_edge_time (edge);
1793 int old_hints_est = estimate_edge_hints (edge);
1795 reset_edge_growth_cache (edge);
1796 gcc_assert (old_size_est == estimate_edge_size (edge));
1797 gcc_assert (old_time_est == estimate_edge_time (edge));
1798 /* FIXME:
1800 gcc_assert (old_hints_est == estimate_edge_hints (edge));
1802 fails with profile feedback because some hints depends on
1803 maybe_hot_edge_p predicate and because callee gets inlined to other
1804 calls, the edge may become cold.
1805 This ought to be fixed by computing relative probabilities
1806 for given invocation but that will be better done once whole
1807 code is converted to sreals. Disable for now and revert to "wrong"
1808 value so enable/disable checking paths agree. */
1809 edge_growth_cache[edge->uid].hints = old_hints_est + 1;
1811 /* When updating the edge costs, we only decrease badness in the keys.
1812 Increases of badness are handled lazilly; when we see key with out
1813 of date value on it, we re-insert it now. */
1814 current_badness = edge_badness (edge, false);
1815 /* Disable checking for profile because roundoff errors may cause slight
1816 deviations in the order. */
1817 gcc_assert (max_count || cached_badness == current_badness);
1818 gcc_assert (current_badness >= badness);
1819 #else
1820 current_badness = edge_badness (edge, false);
1821 #endif
1822 if (current_badness != badness)
1824 if (edge_heap.min () && current_badness > edge_heap.min_key ())
1826 edge->aux = edge_heap.insert (current_badness, edge);
1827 continue;
1829 else
1830 badness = current_badness;
1833 if (!can_inline_edge_p (edge, true))
1835 resolve_noninline_speculation (&edge_heap, edge);
1836 continue;
1839 callee = edge->callee->ultimate_alias_target ();
1840 growth = estimate_edge_growth (edge);
1841 if (dump_file)
1843 fprintf (dump_file,
1844 "\nConsidering %s/%i with %i size\n",
1845 callee->name (), callee->order,
1846 inline_summaries->get (callee)->size);
1847 fprintf (dump_file,
1848 " to be inlined into %s/%i in %s:%i\n"
1849 " Estimated badness is %f, frequency %.2f.\n",
1850 edge->caller->name (), edge->caller->order,
1851 edge->call_stmt
1852 && (LOCATION_LOCUS (gimple_location ((const_gimple)
1853 edge->call_stmt))
1854 > BUILTINS_LOCATION)
1855 ? gimple_filename ((const_gimple) edge->call_stmt)
1856 : "unknown",
1857 edge->call_stmt
1858 ? gimple_lineno ((const_gimple) edge->call_stmt)
1859 : -1,
1860 badness.to_double (),
1861 edge->frequency / (double)CGRAPH_FREQ_BASE);
1862 if (edge->count)
1863 fprintf (dump_file," Called %"PRId64"x\n",
1864 edge->count);
1865 if (dump_flags & TDF_DETAILS)
1866 edge_badness (edge, true);
1869 if (overall_size + growth > max_size
1870 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1872 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1873 report_inline_failed_reason (edge);
1874 resolve_noninline_speculation (&edge_heap, edge);
1875 continue;
1878 if (!want_inline_small_function_p (edge, true))
1880 resolve_noninline_speculation (&edge_heap, edge);
1881 continue;
1884 /* Heuristics for inlining small functions work poorly for
1885 recursive calls where we do effects similar to loop unrolling.
1886 When inlining such edge seems profitable, leave decision on
1887 specific inliner. */
1888 if (edge->recursive_p ())
1890 where = edge->caller;
1891 if (where->global.inlined_to)
1892 where = where->global.inlined_to;
1893 if (!recursive_inlining (edge,
1894 opt_for_fn (edge->caller->decl,
1895 flag_indirect_inlining)
1896 ? &new_indirect_edges : NULL))
1898 edge->inline_failed = CIF_RECURSIVE_INLINING;
1899 resolve_noninline_speculation (&edge_heap, edge);
1900 continue;
1902 reset_edge_caches (where);
1903 /* Recursive inliner inlines all recursive calls of the function
1904 at once. Consequently we need to update all callee keys. */
1905 if (opt_for_fn (edge->caller->decl, flag_indirect_inlining))
1906 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
1907 update_callee_keys (&edge_heap, where, updated_nodes);
1908 bitmap_clear (updated_nodes);
1910 else
1912 struct cgraph_node *outer_node = NULL;
1913 int depth = 0;
1915 /* Consider the case where self recursive function A is inlined
1916 into B. This is desired optimization in some cases, since it
1917 leads to effect similar of loop peeling and we might completely
1918 optimize out the recursive call. However we must be extra
1919 selective. */
1921 where = edge->caller;
1922 while (where->global.inlined_to)
1924 if (where->decl == callee->decl)
1925 outer_node = where, depth++;
1926 where = where->callers->caller;
1928 if (outer_node
1929 && !want_inline_self_recursive_call_p (edge, outer_node,
1930 true, depth))
1932 edge->inline_failed
1933 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
1934 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1935 resolve_noninline_speculation (&edge_heap, edge);
1936 continue;
1938 else if (depth && dump_file)
1939 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
1941 gcc_checking_assert (!callee->global.inlined_to);
1942 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
1943 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
1945 reset_edge_caches (edge->callee->function_symbol ());
1947 update_callee_keys (&edge_heap, where, updated_nodes);
1949 where = edge->caller;
1950 if (where->global.inlined_to)
1951 where = where->global.inlined_to;
1953 /* Our profitability metric can depend on local properties
1954 such as number of inlinable calls and size of the function body.
1955 After inlining these properties might change for the function we
1956 inlined into (since it's body size changed) and for the functions
1957 called by function we inlined (since number of it inlinable callers
1958 might change). */
1959 update_caller_keys (&edge_heap, where, updated_nodes, NULL);
1960 /* Offline copy count has possibly changed, recompute if profile is
1961 available. */
1962 if (max_count)
1964 struct cgraph_node *n = cgraph_node::get (edge->callee->decl);
1965 if (n != edge->callee && n->analyzed)
1966 update_callee_keys (&edge_heap, n, updated_nodes);
1968 bitmap_clear (updated_nodes);
1970 if (dump_file)
1972 fprintf (dump_file,
1973 " Inlined into %s which now has time %i and size %i,"
1974 "net change of %+i.\n",
1975 edge->caller->name (),
1976 inline_summaries->get (edge->caller)->time,
1977 inline_summaries->get (edge->caller)->size,
1978 overall_size - old_size);
1980 if (min_size > overall_size)
1982 min_size = overall_size;
1983 max_size = compute_max_insns (min_size);
1985 if (dump_file)
1986 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1990 free_growth_caches ();
1991 if (dump_file)
1992 fprintf (dump_file,
1993 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1994 initial_size, overall_size,
1995 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
1996 BITMAP_FREE (updated_nodes);
1997 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
2000 /* Flatten NODE. Performed both during early inlining and
2001 at IPA inlining time. */
2003 static void
2004 flatten_function (struct cgraph_node *node, bool early)
2006 struct cgraph_edge *e;
2008 /* We shouldn't be called recursively when we are being processed. */
2009 gcc_assert (node->aux == NULL);
2011 node->aux = (void *) node;
2013 for (e = node->callees; e; e = e->next_callee)
2015 struct cgraph_node *orig_callee;
2016 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2018 /* We've hit cycle? It is time to give up. */
2019 if (callee->aux)
2021 if (dump_file)
2022 fprintf (dump_file,
2023 "Not inlining %s into %s to avoid cycle.\n",
2024 xstrdup_for_dump (callee->name ()),
2025 xstrdup_for_dump (e->caller->name ()));
2026 e->inline_failed = CIF_RECURSIVE_INLINING;
2027 continue;
2030 /* When the edge is already inlined, we just need to recurse into
2031 it in order to fully flatten the leaves. */
2032 if (!e->inline_failed)
2034 flatten_function (callee, early);
2035 continue;
2038 /* Flatten attribute needs to be processed during late inlining. For
2039 extra code quality we however do flattening during early optimization,
2040 too. */
2041 if (!early
2042 ? !can_inline_edge_p (e, true)
2043 : !can_early_inline_edge_p (e))
2044 continue;
2046 if (e->recursive_p ())
2048 if (dump_file)
2049 fprintf (dump_file, "Not inlining: recursive call.\n");
2050 continue;
2053 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
2054 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
2056 if (dump_file)
2057 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
2058 continue;
2061 /* Inline the edge and flatten the inline clone. Avoid
2062 recursing through the original node if the node was cloned. */
2063 if (dump_file)
2064 fprintf (dump_file, " Inlining %s into %s.\n",
2065 xstrdup_for_dump (callee->name ()),
2066 xstrdup_for_dump (e->caller->name ()));
2067 orig_callee = callee;
2068 inline_call (e, true, NULL, NULL, false);
2069 if (e->callee != orig_callee)
2070 orig_callee->aux = (void *) node;
2071 flatten_function (e->callee, early);
2072 if (e->callee != orig_callee)
2073 orig_callee->aux = NULL;
2076 node->aux = NULL;
2077 if (!node->global.inlined_to)
2078 inline_update_overall_summary (node);
2081 /* Count number of callers of NODE and store it into DATA (that
2082 points to int. Worker for cgraph_for_node_and_aliases. */
2084 static bool
2085 sum_callers (struct cgraph_node *node, void *data)
2087 struct cgraph_edge *e;
2088 int *num_calls = (int *)data;
2090 for (e = node->callers; e; e = e->next_caller)
2091 (*num_calls)++;
2092 return false;
2095 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
2096 DATA points to number of calls originally found so we avoid infinite
2097 recursion. */
2099 static bool
2100 inline_to_all_callers (struct cgraph_node *node, void *data)
2102 int *num_calls = (int *)data;
2103 bool callee_removed = false;
2105 while (node->callers && !node->global.inlined_to)
2107 struct cgraph_node *caller = node->callers->caller;
2109 if (!can_inline_edge_p (node->callers, true)
2110 || node->callers->recursive_p ())
2112 if (dump_file)
2113 fprintf (dump_file, "Uninlinable call found; giving up.\n");
2114 *num_calls = 0;
2115 return false;
2118 if (dump_file)
2120 fprintf (dump_file,
2121 "\nInlining %s size %i.\n",
2122 node->name (),
2123 inline_summaries->get (node)->size);
2124 fprintf (dump_file,
2125 " Called once from %s %i insns.\n",
2126 node->callers->caller->name (),
2127 inline_summaries->get (node->callers->caller)->size);
2130 inline_call (node->callers, true, NULL, NULL, true, &callee_removed);
2131 if (dump_file)
2132 fprintf (dump_file,
2133 " Inlined into %s which now has %i size\n",
2134 caller->name (),
2135 inline_summaries->get (caller)->size);
2136 if (!(*num_calls)--)
2138 if (dump_file)
2139 fprintf (dump_file, "New calls found; giving up.\n");
2140 return callee_removed;
2142 if (callee_removed)
2143 return true;
2145 return false;
2148 /* Output overall time estimate. */
2149 static void
2150 dump_overall_stats (void)
2152 int64_t sum_weighted = 0, sum = 0;
2153 struct cgraph_node *node;
2155 FOR_EACH_DEFINED_FUNCTION (node)
2156 if (!node->global.inlined_to
2157 && !node->alias)
2159 int time = inline_summaries->get (node)->time;
2160 sum += time;
2161 sum_weighted += time * node->count;
2163 fprintf (dump_file, "Overall time estimate: "
2164 "%"PRId64" weighted by profile: "
2165 "%"PRId64"\n", sum, sum_weighted);
2168 /* Output some useful stats about inlining. */
2170 static void
2171 dump_inline_stats (void)
2173 int64_t inlined_cnt = 0, inlined_indir_cnt = 0;
2174 int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0;
2175 int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0;
2176 int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
2177 int64_t inlined_speculative = 0, inlined_speculative_ply = 0;
2178 int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
2179 int64_t reason[CIF_N_REASONS][3];
2180 int i;
2181 struct cgraph_node *node;
2183 memset (reason, 0, sizeof (reason));
2184 FOR_EACH_DEFINED_FUNCTION (node)
2186 struct cgraph_edge *e;
2187 for (e = node->callees; e; e = e->next_callee)
2189 if (e->inline_failed)
2191 reason[(int) e->inline_failed][0] += e->count;
2192 reason[(int) e->inline_failed][1] += e->frequency;
2193 reason[(int) e->inline_failed][2] ++;
2194 if (DECL_VIRTUAL_P (e->callee->decl))
2196 if (e->indirect_inlining_edge)
2197 noninlined_virt_indir_cnt += e->count;
2198 else
2199 noninlined_virt_cnt += e->count;
2201 else
2203 if (e->indirect_inlining_edge)
2204 noninlined_indir_cnt += e->count;
2205 else
2206 noninlined_cnt += e->count;
2209 else
2211 if (e->speculative)
2213 if (DECL_VIRTUAL_P (e->callee->decl))
2214 inlined_speculative_ply += e->count;
2215 else
2216 inlined_speculative += e->count;
2218 else if (DECL_VIRTUAL_P (e->callee->decl))
2220 if (e->indirect_inlining_edge)
2221 inlined_virt_indir_cnt += e->count;
2222 else
2223 inlined_virt_cnt += e->count;
2225 else
2227 if (e->indirect_inlining_edge)
2228 inlined_indir_cnt += e->count;
2229 else
2230 inlined_cnt += e->count;
2234 for (e = node->indirect_calls; e; e = e->next_callee)
2235 if (e->indirect_info->polymorphic)
2236 indirect_poly_cnt += e->count;
2237 else
2238 indirect_cnt += e->count;
2240 if (max_count)
2242 fprintf (dump_file,
2243 "Inlined %"PRId64 " + speculative "
2244 "%"PRId64 " + speculative polymorphic "
2245 "%"PRId64 " + previously indirect "
2246 "%"PRId64 " + virtual "
2247 "%"PRId64 " + virtual and previously indirect "
2248 "%"PRId64 "\n" "Not inlined "
2249 "%"PRId64 " + previously indirect "
2250 "%"PRId64 " + virtual "
2251 "%"PRId64 " + virtual and previously indirect "
2252 "%"PRId64 " + stil indirect "
2253 "%"PRId64 " + still indirect polymorphic "
2254 "%"PRId64 "\n", inlined_cnt,
2255 inlined_speculative, inlined_speculative_ply,
2256 inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
2257 noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
2258 noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
2259 fprintf (dump_file,
2260 "Removed speculations %"PRId64 "\n",
2261 spec_rem);
2263 dump_overall_stats ();
2264 fprintf (dump_file, "\nWhy inlining failed?\n");
2265 for (i = 0; i < CIF_N_REASONS; i++)
2266 if (reason[i][2])
2267 fprintf (dump_file, "%-50s: %8i calls, %8i freq, %"PRId64" count\n",
2268 cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
2269 (int) reason[i][2], (int) reason[i][1], reason[i][0]);
2272 /* Decide on the inlining. We do so in the topological order to avoid
2273 expenses on updating data structures. */
2275 static unsigned int
2276 ipa_inline (void)
2278 struct cgraph_node *node;
2279 int nnodes;
2280 struct cgraph_node **order;
2281 int i;
2282 int cold;
2283 bool remove_functions = false;
2285 if (!optimize)
2286 return 0;
2288 cgraph_freq_base_rec = (sreal) 1 / (sreal) CGRAPH_FREQ_BASE;
2289 percent_rec = (sreal) 1 / (sreal) 100;
2291 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
2293 if (in_lto_p && optimize)
2294 ipa_update_after_lto_read ();
2296 if (dump_file)
2297 dump_inline_summaries (dump_file);
2299 nnodes = ipa_reverse_postorder (order);
2301 FOR_EACH_FUNCTION (node)
2303 node->aux = 0;
2305 /* Recompute the default reasons for inlining because they may have
2306 changed during merging. */
2307 if (in_lto_p)
2309 for (cgraph_edge *e = node->callees; e; e = e->next_callee)
2311 gcc_assert (e->inline_failed);
2312 initialize_inline_failed (e);
2314 for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
2315 initialize_inline_failed (e);
2319 if (dump_file)
2320 fprintf (dump_file, "\nFlattening functions:\n");
2322 /* In the first pass handle functions to be flattened. Do this with
2323 a priority so none of our later choices will make this impossible. */
2324 for (i = nnodes - 1; i >= 0; i--)
2326 node = order[i];
2328 /* Handle nodes to be flattened.
2329 Ideally when processing callees we stop inlining at the
2330 entry of cycles, possibly cloning that entry point and
2331 try to flatten itself turning it into a self-recursive
2332 function. */
2333 if (lookup_attribute ("flatten",
2334 DECL_ATTRIBUTES (node->decl)) != NULL)
2336 if (dump_file)
2337 fprintf (dump_file,
2338 "Flattening %s\n", node->name ());
2339 flatten_function (node, false);
2342 if (dump_file)
2343 dump_overall_stats ();
2345 inline_small_functions ();
2347 gcc_assert (symtab->state == IPA_SSA);
2348 symtab->state = IPA_SSA_AFTER_INLINING;
2349 /* Do first after-inlining removal. We want to remove all "stale" extern
2350 inline functions and virtual functions so we really know what is called
2351 once. */
2352 symtab->remove_unreachable_nodes (dump_file);
2353 free (order);
2355 /* Inline functions with a property that after inlining into all callers the
2356 code size will shrink because the out-of-line copy is eliminated.
2357 We do this regardless on the callee size as long as function growth limits
2358 are met. */
2359 if (dump_file)
2360 fprintf (dump_file,
2361 "\nDeciding on functions to be inlined into all callers and "
2362 "removing useless speculations:\n");
2364 /* Inlining one function called once has good chance of preventing
2365 inlining other function into the same callee. Ideally we should
2366 work in priority order, but probably inlining hot functions first
2367 is good cut without the extra pain of maintaining the queue.
2369 ??? this is not really fitting the bill perfectly: inlining function
2370 into callee often leads to better optimization of callee due to
2371 increased context for optimization.
2372 For example if main() function calls a function that outputs help
2373 and then function that does the main optmization, we should inline
2374 the second with priority even if both calls are cold by themselves.
2376 We probably want to implement new predicate replacing our use of
2377 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2378 to be hot. */
2379 for (cold = 0; cold <= 1; cold ++)
2381 FOR_EACH_DEFINED_FUNCTION (node)
2383 struct cgraph_edge *edge, *next;
2384 bool update=false;
2386 for (edge = node->callees; edge; edge = next)
2388 next = edge->next_callee;
2389 if (edge->speculative && !speculation_useful_p (edge, false))
2391 edge->resolve_speculation ();
2392 spec_rem += edge->count;
2393 update = true;
2394 remove_functions = true;
2397 if (update)
2399 struct cgraph_node *where = node->global.inlined_to
2400 ? node->global.inlined_to : node;
2401 reset_edge_caches (where);
2402 inline_update_overall_summary (where);
2404 if (want_inline_function_to_all_callers_p (node, cold))
2406 int num_calls = 0;
2407 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
2408 true);
2409 while (node->call_for_symbol_and_aliases
2410 (inline_to_all_callers, &num_calls, true))
2412 remove_functions = true;
2417 /* Free ipa-prop structures if they are no longer needed. */
2418 if (optimize)
2419 ipa_free_all_structures_after_iinln ();
2421 if (dump_file)
2423 fprintf (dump_file,
2424 "\nInlined %i calls, eliminated %i functions\n\n",
2425 ncalls_inlined, nfunctions_inlined);
2426 dump_inline_stats ();
2429 if (dump_file)
2430 dump_inline_summaries (dump_file);
2431 /* In WPA we use inline summaries for partitioning process. */
2432 if (!flag_wpa)
2433 inline_free_summary ();
2434 return remove_functions ? TODO_remove_functions : 0;
2437 /* Inline always-inline function calls in NODE. */
2439 static bool
2440 inline_always_inline_functions (struct cgraph_node *node)
2442 struct cgraph_edge *e;
2443 bool inlined = false;
2445 for (e = node->callees; e; e = e->next_callee)
2447 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2448 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
2449 continue;
2451 if (e->recursive_p ())
2453 if (dump_file)
2454 fprintf (dump_file, " Not inlining recursive call to %s.\n",
2455 e->callee->name ());
2456 e->inline_failed = CIF_RECURSIVE_INLINING;
2457 continue;
2460 if (!can_early_inline_edge_p (e))
2462 /* Set inlined to true if the callee is marked "always_inline" but
2463 is not inlinable. This will allow flagging an error later in
2464 expand_call_inline in tree-inline.c. */
2465 if (lookup_attribute ("always_inline",
2466 DECL_ATTRIBUTES (callee->decl)) != NULL)
2467 inlined = true;
2468 continue;
2471 if (dump_file)
2472 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
2473 xstrdup_for_dump (e->callee->name ()),
2474 xstrdup_for_dump (e->caller->name ()));
2475 inline_call (e, true, NULL, NULL, false);
2476 inlined = true;
2478 if (inlined)
2479 inline_update_overall_summary (node);
2481 return inlined;
2484 /* Decide on the inlining. We do so in the topological order to avoid
2485 expenses on updating data structures. */
2487 static bool
2488 early_inline_small_functions (struct cgraph_node *node)
2490 struct cgraph_edge *e;
2491 bool inlined = false;
2493 for (e = node->callees; e; e = e->next_callee)
2495 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2496 if (!inline_summaries->get (callee)->inlinable
2497 || !e->inline_failed)
2498 continue;
2500 /* Do not consider functions not declared inline. */
2501 if (!DECL_DECLARED_INLINE_P (callee->decl)
2502 && !opt_for_fn (node->decl, flag_inline_small_functions)
2503 && !opt_for_fn (node->decl, flag_inline_functions))
2504 continue;
2506 if (dump_file)
2507 fprintf (dump_file, "Considering inline candidate %s.\n",
2508 callee->name ());
2510 if (!can_early_inline_edge_p (e))
2511 continue;
2513 if (e->recursive_p ())
2515 if (dump_file)
2516 fprintf (dump_file, " Not inlining: recursive call.\n");
2517 continue;
2520 if (!want_early_inline_function_p (e))
2521 continue;
2523 if (dump_file)
2524 fprintf (dump_file, " Inlining %s into %s.\n",
2525 xstrdup_for_dump (callee->name ()),
2526 xstrdup_for_dump (e->caller->name ()));
2527 inline_call (e, true, NULL, NULL, true);
2528 inlined = true;
2531 return inlined;
2534 unsigned int
2535 early_inliner (function *fun)
2537 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2538 struct cgraph_edge *edge;
2539 unsigned int todo = 0;
2540 int iterations = 0;
2541 bool inlined = false;
2543 if (seen_error ())
2544 return 0;
2546 /* Do nothing if datastructures for ipa-inliner are already computed. This
2547 happens when some pass decides to construct new function and
2548 cgraph_add_new_function calls lowering passes and early optimization on
2549 it. This may confuse ourself when early inliner decide to inline call to
2550 function clone, because function clones don't have parameter list in
2551 ipa-prop matching their signature. */
2552 if (ipa_node_params_sum)
2553 return 0;
2555 #ifdef ENABLE_CHECKING
2556 node->verify ();
2557 #endif
2558 node->remove_all_references ();
2560 /* Rebuild this reference because it dosn't depend on
2561 function's body and it's required to pass cgraph_node
2562 verification. */
2563 if (node->instrumented_version
2564 && !node->instrumentation_clone)
2565 node->create_reference (node->instrumented_version, IPA_REF_CHKP, NULL);
2567 /* Even when not optimizing or not inlining inline always-inline
2568 functions. */
2569 inlined = inline_always_inline_functions (node);
2571 if (!optimize
2572 || flag_no_inline
2573 || !flag_early_inlining
2574 /* Never inline regular functions into always-inline functions
2575 during incremental inlining. This sucks as functions calling
2576 always inline functions will get less optimized, but at the
2577 same time inlining of functions calling always inline
2578 function into an always inline function might introduce
2579 cycles of edges to be always inlined in the callgraph.
2581 We might want to be smarter and just avoid this type of inlining. */
2582 || (DECL_DISREGARD_INLINE_LIMITS (node->decl)
2583 && lookup_attribute ("always_inline",
2584 DECL_ATTRIBUTES (node->decl))))
2586 else if (lookup_attribute ("flatten",
2587 DECL_ATTRIBUTES (node->decl)) != NULL)
2589 /* When the function is marked to be flattened, recursively inline
2590 all calls in it. */
2591 if (dump_file)
2592 fprintf (dump_file,
2593 "Flattening %s\n", node->name ());
2594 flatten_function (node, true);
2595 inlined = true;
2597 else
2599 /* If some always_inline functions was inlined, apply the changes.
2600 This way we will not account always inline into growth limits and
2601 moreover we will inline calls from always inlines that we skipped
2602 previously becuase of conditional above. */
2603 if (inlined)
2605 timevar_push (TV_INTEGRATION);
2606 todo |= optimize_inline_calls (current_function_decl);
2607 /* optimize_inline_calls call above might have introduced new
2608 statements that don't have inline parameters computed. */
2609 for (edge = node->callees; edge; edge = edge->next_callee)
2611 if (inline_edge_summary_vec.length () > (unsigned) edge->uid)
2613 struct inline_edge_summary *es = inline_edge_summary (edge);
2614 es->call_stmt_size
2615 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2616 es->call_stmt_time
2617 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2620 inline_update_overall_summary (node);
2621 inlined = false;
2622 timevar_pop (TV_INTEGRATION);
2624 /* We iterate incremental inlining to get trivial cases of indirect
2625 inlining. */
2626 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
2627 && early_inline_small_functions (node))
2629 timevar_push (TV_INTEGRATION);
2630 todo |= optimize_inline_calls (current_function_decl);
2632 /* Technically we ought to recompute inline parameters so the new
2633 iteration of early inliner works as expected. We however have
2634 values approximately right and thus we only need to update edge
2635 info that might be cleared out for newly discovered edges. */
2636 for (edge = node->callees; edge; edge = edge->next_callee)
2638 /* We have no summary for new bound store calls yet. */
2639 if (inline_edge_summary_vec.length () > (unsigned)edge->uid)
2641 struct inline_edge_summary *es = inline_edge_summary (edge);
2642 es->call_stmt_size
2643 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2644 es->call_stmt_time
2645 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2647 if (edge->callee->decl
2648 && !gimple_check_call_matching_types (
2649 edge->call_stmt, edge->callee->decl, false))
2650 edge->call_stmt_cannot_inline_p = true;
2652 if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
2653 inline_update_overall_summary (node);
2654 timevar_pop (TV_INTEGRATION);
2655 iterations++;
2656 inlined = false;
2658 if (dump_file)
2659 fprintf (dump_file, "Iterations: %i\n", iterations);
2662 if (inlined)
2664 timevar_push (TV_INTEGRATION);
2665 todo |= optimize_inline_calls (current_function_decl);
2666 timevar_pop (TV_INTEGRATION);
2669 fun->always_inline_functions_inlined = true;
2671 return todo;
2674 /* Do inlining of small functions. Doing so early helps profiling and other
2675 passes to be somewhat more effective and avoids some code duplication in
2676 later real inlining pass for testcases with very many function calls. */
2678 namespace {
2680 const pass_data pass_data_early_inline =
2682 GIMPLE_PASS, /* type */
2683 "einline", /* name */
2684 OPTGROUP_INLINE, /* optinfo_flags */
2685 TV_EARLY_INLINING, /* tv_id */
2686 PROP_ssa, /* properties_required */
2687 0, /* properties_provided */
2688 0, /* properties_destroyed */
2689 0, /* todo_flags_start */
2690 0, /* todo_flags_finish */
2693 class pass_early_inline : public gimple_opt_pass
2695 public:
2696 pass_early_inline (gcc::context *ctxt)
2697 : gimple_opt_pass (pass_data_early_inline, ctxt)
2700 /* opt_pass methods: */
2701 virtual unsigned int execute (function *);
2703 }; // class pass_early_inline
2705 unsigned int
2706 pass_early_inline::execute (function *fun)
2708 return early_inliner (fun);
2711 } // anon namespace
2713 gimple_opt_pass *
2714 make_pass_early_inline (gcc::context *ctxt)
2716 return new pass_early_inline (ctxt);
2719 namespace {
2721 const pass_data pass_data_ipa_inline =
2723 IPA_PASS, /* type */
2724 "inline", /* name */
2725 OPTGROUP_INLINE, /* optinfo_flags */
2726 TV_IPA_INLINING, /* tv_id */
2727 0, /* properties_required */
2728 0, /* properties_provided */
2729 0, /* properties_destroyed */
2730 0, /* todo_flags_start */
2731 ( TODO_dump_symtab ), /* todo_flags_finish */
2734 class pass_ipa_inline : public ipa_opt_pass_d
2736 public:
2737 pass_ipa_inline (gcc::context *ctxt)
2738 : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
2739 inline_generate_summary, /* generate_summary */
2740 inline_write_summary, /* write_summary */
2741 inline_read_summary, /* read_summary */
2742 NULL, /* write_optimization_summary */
2743 NULL, /* read_optimization_summary */
2744 NULL, /* stmt_fixup */
2745 0, /* function_transform_todo_flags_start */
2746 inline_transform, /* function_transform */
2747 NULL) /* variable_transform */
2750 /* opt_pass methods: */
2751 virtual unsigned int execute (function *) { return ipa_inline (); }
2753 }; // class pass_ipa_inline
2755 } // anon namespace
2757 ipa_opt_pass_d *
2758 make_pass_ipa_inline (gcc::context *ctxt)
2760 return new pass_ipa_inline (ctxt);