PR c/64768
[official-gcc.git] / gcc / ipa-inline.c
blob287a6dd1c4f41bf9e674b6e0dc46bd09a42b2157
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
29 on).
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
34 inlining.
36 inlining heuristics
38 The inliner itself is split into two passes:
40 pass_early_inlining
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
55 flattening.
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
60 optimizers.
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
68 pass_ipa_inline
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "tm.h"
96 #include "hash-set.h"
97 #include "machmode.h"
98 #include "vec.h"
99 #include "double-int.h"
100 #include "input.h"
101 #include "alias.h"
102 #include "symtab.h"
103 #include "wide-int.h"
104 #include "inchash.h"
105 #include "tree.h"
106 #include "fold-const.h"
107 #include "trans-mem.h"
108 #include "calls.h"
109 #include "tree-inline.h"
110 #include "langhooks.h"
111 #include "flags.h"
112 #include "diagnostic.h"
113 #include "gimple-pretty-print.h"
114 #include "params.h"
115 #include "intl.h"
116 #include "tree-pass.h"
117 #include "coverage.h"
118 #include "rtl.h"
119 #include "bitmap.h"
120 #include "profile.h"
121 #include "predict.h"
122 #include "hard-reg-set.h"
123 #include "input.h"
124 #include "function.h"
125 #include "basic-block.h"
126 #include "tree-ssa-alias.h"
127 #include "internal-fn.h"
128 #include "gimple-expr.h"
129 #include "is-a.h"
130 #include "gimple.h"
131 #include "gimple-ssa.h"
132 #include "hash-map.h"
133 #include "plugin-api.h"
134 #include "ipa-ref.h"
135 #include "cgraph.h"
136 #include "alloc-pool.h"
137 #include "symbol-summary.h"
138 #include "ipa-prop.h"
139 #include "except.h"
140 #include "target.h"
141 #include "ipa-inline.h"
142 #include "ipa-utils.h"
143 #include "sreal.h"
144 #include "auto-profile.h"
145 #include "cilk.h"
146 #include "builtins.h"
147 #include "fibonacci_heap.h"
148 #include "lto-streamer.h"
150 typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
151 typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
153 /* Statistics we collect about inlining algorithm. */
154 static int overall_size;
155 static gcov_type max_count;
156 static gcov_type spec_rem;
158 /* Pre-computed constants 1/CGRAPH_FREQ_BASE and 1/100. */
159 static sreal cgraph_freq_base_rec, percent_rec;
161 /* Return false when inlining edge E would lead to violating
162 limits on function unit growth or stack usage growth.
164 The relative function body growth limit is present generally
165 to avoid problems with non-linear behavior of the compiler.
166 To allow inlining huge functions into tiny wrapper, the limit
167 is always based on the bigger of the two functions considered.
169 For stack growth limits we always base the growth in stack usage
170 of the callers. We want to prevent applications from segfaulting
171 on stack overflow when functions with huge stack frames gets
172 inlined. */
174 static bool
175 caller_growth_limits (struct cgraph_edge *e)
177 struct cgraph_node *to = e->caller;
178 struct cgraph_node *what = e->callee->ultimate_alias_target ();
179 int newsize;
180 int limit = 0;
181 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
182 inline_summary *info, *what_info, *outer_info = inline_summaries->get (to);
184 /* Look for function e->caller is inlined to. While doing
185 so work out the largest function body on the way. As
186 described above, we want to base our function growth
187 limits based on that. Not on the self size of the
188 outer function, not on the self size of inline code
189 we immediately inline to. This is the most relaxed
190 interpretation of the rule "do not grow large functions
191 too much in order to prevent compiler from exploding". */
192 while (true)
194 info = inline_summaries->get (to);
195 if (limit < info->self_size)
196 limit = info->self_size;
197 if (stack_size_limit < info->estimated_self_stack_size)
198 stack_size_limit = info->estimated_self_stack_size;
199 if (to->global.inlined_to)
200 to = to->callers->caller;
201 else
202 break;
205 what_info = inline_summaries->get (what);
207 if (limit < what_info->self_size)
208 limit = what_info->self_size;
210 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
212 /* Check the size after inlining against the function limits. But allow
213 the function to shrink if it went over the limits by forced inlining. */
214 newsize = estimate_size_after_inlining (to, e);
215 if (newsize >= info->size
216 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
217 && newsize > limit)
219 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
220 return false;
223 if (!what_info->estimated_stack_size)
224 return true;
226 /* FIXME: Stack size limit often prevents inlining in Fortran programs
227 due to large i/o datastructures used by the Fortran front-end.
228 We ought to ignore this limit when we know that the edge is executed
229 on every invocation of the caller (i.e. its call statement dominates
230 exit block). We do not track this information, yet. */
231 stack_size_limit += ((gcov_type)stack_size_limit
232 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
234 inlined_stack = (outer_info->stack_frame_offset
235 + outer_info->estimated_self_stack_size
236 + what_info->estimated_stack_size);
237 /* Check new stack consumption with stack consumption at the place
238 stack is used. */
239 if (inlined_stack > stack_size_limit
240 /* If function already has large stack usage from sibling
241 inline call, we can inline, too.
242 This bit overoptimistically assume that we are good at stack
243 packing. */
244 && inlined_stack > info->estimated_stack_size
245 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
247 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
248 return false;
250 return true;
253 /* Dump info about why inlining has failed. */
255 static void
256 report_inline_failed_reason (struct cgraph_edge *e)
258 if (dump_file)
260 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
261 xstrdup_for_dump (e->caller->name ()), e->caller->order,
262 xstrdup_for_dump (e->callee->name ()), e->callee->order,
263 cgraph_inline_failed_string (e->inline_failed));
264 if ((e->inline_failed == CIF_TARGET_OPTION_MISMATCH
265 || e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
266 && e->caller->lto_file_data
267 && e->callee->function_symbol ()->lto_file_data)
269 fprintf (dump_file, " LTO objects: %s, %s\n",
270 e->caller->lto_file_data->file_name,
271 e->callee->function_symbol ()->lto_file_data->file_name);
273 if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH)
274 cl_target_option_print_diff
275 (dump_file, 2, target_opts_for_fn (e->caller->decl),
276 target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
277 if (e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
278 cl_optimization_print_diff
279 (dump_file, 2, opts_for_fn (e->caller->decl),
280 opts_for_fn (e->callee->ultimate_alias_target ()->decl));
284 /* Decide whether sanitizer-related attributes allow inlining. */
286 static bool
287 sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
289 /* Don't care if sanitizer is disabled */
290 if (!(flag_sanitize & SANITIZE_ADDRESS))
291 return true;
293 if (!caller || !callee)
294 return true;
296 return !!lookup_attribute ("no_sanitize_address",
297 DECL_ATTRIBUTES (caller)) ==
298 !!lookup_attribute ("no_sanitize_address",
299 DECL_ATTRIBUTES (callee));
302 /* Decide if we can inline the edge and possibly update
303 inline_failed reason.
304 We check whether inlining is possible at all and whether
305 caller growth limits allow doing so.
307 if REPORT is true, output reason to the dump file.
309 if DISREGARD_LIMITS is true, ignore size limits.*/
311 static bool
312 can_inline_edge_p (struct cgraph_edge *e, bool report,
313 bool disregard_limits = false)
315 bool inlinable = true;
316 enum availability avail;
317 cgraph_node *callee = e->callee->ultimate_alias_target (&avail);
318 cgraph_node *caller = e->caller->global.inlined_to
319 ? e->caller->global.inlined_to : e->caller;
320 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl);
321 tree callee_tree
322 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
323 struct function *caller_fun = caller->get_fun ();
324 struct function *callee_fun = callee ? callee->get_fun () : NULL;
326 gcc_assert (e->inline_failed);
328 if (!callee || !callee->definition)
330 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
331 inlinable = false;
333 else if (callee->calls_comdat_local)
335 e->inline_failed = CIF_USES_COMDAT_LOCAL;
336 inlinable = false;
338 else if (!inline_summaries->get (callee)->inlinable
339 || (caller_fun && fn_contains_cilk_spawn_p (caller_fun)))
341 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
342 inlinable = false;
344 else if (avail <= AVAIL_INTERPOSABLE)
346 e->inline_failed = CIF_OVERWRITABLE;
347 inlinable = false;
349 else if (e->call_stmt_cannot_inline_p)
351 if (e->inline_failed != CIF_FUNCTION_NOT_OPTIMIZED)
352 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
353 inlinable = false;
355 /* Don't inline if the functions have different EH personalities. */
356 else if (DECL_FUNCTION_PERSONALITY (caller->decl)
357 && DECL_FUNCTION_PERSONALITY (callee->decl)
358 && (DECL_FUNCTION_PERSONALITY (caller->decl)
359 != DECL_FUNCTION_PERSONALITY (callee->decl)))
361 e->inline_failed = CIF_EH_PERSONALITY;
362 inlinable = false;
364 /* TM pure functions should not be inlined into non-TM_pure
365 functions. */
366 else if (is_tm_pure (callee->decl)
367 && !is_tm_pure (caller->decl))
369 e->inline_failed = CIF_UNSPECIFIED;
370 inlinable = false;
372 /* Don't inline if the callee can throw non-call exceptions but the
373 caller cannot.
374 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
375 Move the flag into cgraph node or mirror it in the inline summary. */
376 else if (callee_fun && callee_fun->can_throw_non_call_exceptions
377 && !(caller_fun && caller_fun->can_throw_non_call_exceptions))
379 e->inline_failed = CIF_NON_CALL_EXCEPTIONS;
380 inlinable = false;
382 /* Check compatibility of target optimization options. */
383 else if (!targetm.target_option.can_inline_p (caller->decl,
384 callee->decl))
386 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
387 inlinable = false;
389 /* Don't inline a function with mismatched sanitization attributes. */
390 else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl))
392 e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
393 inlinable = false;
395 /* Check if caller growth allows the inlining. */
396 else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
397 && !disregard_limits
398 && !lookup_attribute ("flatten",
399 DECL_ATTRIBUTES (caller->decl))
400 && !caller_growth_limits (e))
401 inlinable = false;
402 /* Don't inline a function with a higher optimization level than the
403 caller. FIXME: this is really just tip of iceberg of handling
404 optimization attribute. */
405 else if (caller_tree != callee_tree)
407 /* There are some options that change IL semantics which means
408 we cannot inline in these cases for correctness reason.
409 Not even for always_inline declared functions. */
410 /* Strictly speaking only when the callee contains signed integer
411 math where overflow is undefined. */
412 if ((opt_for_fn (e->caller->decl, flag_strict_overflow)
413 != opt_for_fn (e->caller->decl, flag_strict_overflow))
414 || (opt_for_fn (e->caller->decl, flag_wrapv)
415 != opt_for_fn (e->caller->decl, flag_wrapv))
416 || (opt_for_fn (e->caller->decl, flag_trapv)
417 != opt_for_fn (e->caller->decl, flag_trapv))
418 /* Strictly speaking only when the callee contains memory
419 accesses that are not using alias-set zero anyway. */
420 || (opt_for_fn (e->caller->decl, flag_strict_aliasing)
421 != opt_for_fn (e->caller->decl, flag_strict_aliasing))
422 /* Strictly speaking only when the callee uses FP math. */
423 || (opt_for_fn (e->caller->decl, flag_rounding_math)
424 != opt_for_fn (e->caller->decl, flag_rounding_math))
425 || (opt_for_fn (e->caller->decl, flag_trapping_math)
426 != opt_for_fn (e->caller->decl, flag_trapping_math))
427 || (opt_for_fn (e->caller->decl, flag_unsafe_math_optimizations)
428 != opt_for_fn (e->caller->decl, flag_unsafe_math_optimizations))
429 || (opt_for_fn (e->caller->decl, flag_finite_math_only)
430 != opt_for_fn (e->caller->decl, flag_finite_math_only))
431 || (opt_for_fn (e->caller->decl, flag_signaling_nans)
432 != opt_for_fn (e->caller->decl, flag_signaling_nans))
433 || (opt_for_fn (e->caller->decl, flag_cx_limited_range)
434 != opt_for_fn (e->caller->decl, flag_cx_limited_range))
435 || (opt_for_fn (e->caller->decl, flag_signed_zeros)
436 != opt_for_fn (e->caller->decl, flag_signed_zeros))
437 || (opt_for_fn (e->caller->decl, flag_associative_math)
438 != opt_for_fn (e->caller->decl, flag_associative_math))
439 || (opt_for_fn (e->caller->decl, flag_reciprocal_math)
440 != opt_for_fn (e->caller->decl, flag_reciprocal_math))
441 /* Strictly speaking only when the callee contains function
442 calls that may end up setting errno. */
443 || (opt_for_fn (e->caller->decl, flag_errno_math)
444 != opt_for_fn (e->caller->decl, flag_errno_math)))
446 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
447 inlinable = false;
449 /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */
450 else if (DECL_DISREGARD_INLINE_LIMITS (callee->decl)
451 && lookup_attribute ("always_inline",
452 DECL_ATTRIBUTES (callee->decl)))
454 /* When user added an attribute to the callee honor it. */
455 else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee->decl))
456 && opts_for_fn (caller->decl) != opts_for_fn (callee->decl))
458 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
459 inlinable = false;
461 /* If mismatch is caused by merging two LTO units with different
462 optimizationflags we want to be bit nicer. However never inline
463 if one of functions is not optimized at all. */
464 else if (!opt_for_fn (callee->decl, optimize)
465 || !opt_for_fn (caller->decl, optimize))
467 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
468 inlinable = false;
470 /* If callee is optimized for size and caller is not, allow inlining if
471 code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee
472 is inline (and thus likely an unified comdat). This will allow caller
473 to run faster. */
474 else if (opt_for_fn (callee->decl, optimize_size)
475 > opt_for_fn (caller->decl, optimize_size))
477 int growth = estimate_edge_growth (e);
478 if (growth > 0
479 && (!DECL_DECLARED_INLINE_P (callee->decl)
480 && growth >= MAX (MAX_INLINE_INSNS_SINGLE,
481 MAX_INLINE_INSNS_AUTO)))
483 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
484 inlinable = false;
487 /* If callee is more aggressively optimized for performance than caller,
488 we generally want to inline only cheap (runtime wise) functions. */
489 else if (opt_for_fn (callee->decl, optimize_size)
490 < opt_for_fn (caller->decl, optimize_size)
491 || (opt_for_fn (callee->decl, optimize)
492 >= opt_for_fn (caller->decl, optimize)))
494 if (estimate_edge_time (e)
495 >= 20 + inline_edge_summary (e)->call_stmt_time)
497 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
498 inlinable = false;
504 if (!inlinable && report)
505 report_inline_failed_reason (e);
506 return inlinable;
510 /* Return true if the edge E is inlinable during early inlining. */
512 static bool
513 can_early_inline_edge_p (struct cgraph_edge *e)
515 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
516 /* Early inliner might get called at WPA stage when IPA pass adds new
517 function. In this case we can not really do any of early inlining
518 because function bodies are missing. */
519 if (!gimple_has_body_p (callee->decl))
521 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
522 return false;
524 /* In early inliner some of callees may not be in SSA form yet
525 (i.e. the callgraph is cyclic and we did not process
526 the callee by early inliner, yet). We don't have CIF code for this
527 case; later we will re-do the decision in the real inliner. */
528 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
529 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
531 if (dump_file)
532 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
533 return false;
535 if (!can_inline_edge_p (e, true))
536 return false;
537 return true;
541 /* Return number of calls in N. Ignore cheap builtins. */
543 static int
544 num_calls (struct cgraph_node *n)
546 struct cgraph_edge *e;
547 int num = 0;
549 for (e = n->callees; e; e = e->next_callee)
550 if (!is_inexpensive_builtin (e->callee->decl))
551 num++;
552 return num;
556 /* Return true if we are interested in inlining small function. */
558 static bool
559 want_early_inline_function_p (struct cgraph_edge *e)
561 bool want_inline = true;
562 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
564 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
566 /* For AutoFDO, we need to make sure that before profile summary, all
567 hot paths' IR look exactly the same as profiled binary. As a result,
568 in einliner, we will disregard size limit and inline those callsites
569 that are:
570 * inlined in the profiled binary, and
571 * the cloned callee has enough samples to be considered "hot". */
572 else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e))
574 else if (!DECL_DECLARED_INLINE_P (callee->decl)
575 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
577 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
578 report_inline_failed_reason (e);
579 want_inline = false;
581 else
583 int growth = estimate_edge_growth (e);
584 int n;
586 if (growth <= 0)
588 else if (!e->maybe_hot_p ()
589 && growth > 0)
591 if (dump_file)
592 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
593 "call is cold and code would grow by %i\n",
594 xstrdup_for_dump (e->caller->name ()),
595 e->caller->order,
596 xstrdup_for_dump (callee->name ()), callee->order,
597 growth);
598 want_inline = false;
600 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
602 if (dump_file)
603 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
604 "growth %i exceeds --param early-inlining-insns\n",
605 xstrdup_for_dump (e->caller->name ()),
606 e->caller->order,
607 xstrdup_for_dump (callee->name ()), callee->order,
608 growth);
609 want_inline = false;
611 else if ((n = num_calls (callee)) != 0
612 && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
614 if (dump_file)
615 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
616 "growth %i exceeds --param early-inlining-insns "
617 "divided by number of calls\n",
618 xstrdup_for_dump (e->caller->name ()),
619 e->caller->order,
620 xstrdup_for_dump (callee->name ()), callee->order,
621 growth);
622 want_inline = false;
625 return want_inline;
628 /* Compute time of the edge->caller + edge->callee execution when inlining
629 does not happen. */
631 inline sreal
632 compute_uninlined_call_time (struct inline_summary *callee_info,
633 struct cgraph_edge *edge)
635 sreal uninlined_call_time = (sreal)callee_info->time;
636 cgraph_node *caller = (edge->caller->global.inlined_to
637 ? edge->caller->global.inlined_to
638 : edge->caller);
640 if (edge->count && caller->count)
641 uninlined_call_time *= (sreal)edge->count / caller->count;
642 if (edge->frequency)
643 uninlined_call_time *= cgraph_freq_base_rec * edge->frequency;
644 else
645 uninlined_call_time = uninlined_call_time >> 11;
647 int caller_time = inline_summaries->get (caller)->time;
648 return uninlined_call_time + caller_time;
651 /* Same as compute_uinlined_call_time but compute time when inlining
652 does happen. */
654 inline sreal
655 compute_inlined_call_time (struct cgraph_edge *edge,
656 int edge_time)
658 cgraph_node *caller = (edge->caller->global.inlined_to
659 ? edge->caller->global.inlined_to
660 : edge->caller);
661 int caller_time = inline_summaries->get (caller)->time;
662 sreal time = edge_time;
664 if (edge->count && caller->count)
665 time *= (sreal)edge->count / caller->count;
666 if (edge->frequency)
667 time *= cgraph_freq_base_rec * edge->frequency;
668 else
669 time = time >> 11;
671 /* This calculation should match one in ipa-inline-analysis.
672 FIXME: Once ipa-inline-analysis is converted to sreal this can be
673 simplified. */
674 time -= (sreal) ((gcov_type) edge->frequency
675 * inline_edge_summary (edge)->call_stmt_time
676 * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE)) / INLINE_TIME_SCALE;
677 time += caller_time;
678 if (time <= 0)
679 time = ((sreal) 1) >> 8;
680 gcc_checking_assert (time >= 0);
681 return time;
684 /* Return true if the speedup for inlining E is bigger than
685 PARAM_MAX_INLINE_MIN_SPEEDUP. */
687 static bool
688 big_speedup_p (struct cgraph_edge *e)
690 sreal time = compute_uninlined_call_time (inline_summaries->get (e->callee),
692 sreal inlined_time = compute_inlined_call_time (e, estimate_edge_time (e));
694 if (time - inlined_time
695 > (sreal) time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP)
696 * percent_rec)
697 return true;
698 return false;
701 /* Return true if we are interested in inlining small function.
702 When REPORT is true, report reason to dump file. */
704 static bool
705 want_inline_small_function_p (struct cgraph_edge *e, bool report)
707 bool want_inline = true;
708 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
710 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
712 else if (!DECL_DECLARED_INLINE_P (callee->decl)
713 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
715 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
716 want_inline = false;
718 /* Do fast and conservative check if the function can be good
719 inline candidate. At the moment we allow inline hints to
720 promote non-inline functions to inline and we increase
721 MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
722 else if ((!DECL_DECLARED_INLINE_P (callee->decl)
723 && (!e->count || !e->maybe_hot_p ()))
724 && inline_summaries->get (callee)->min_size
725 - inline_edge_summary (e)->call_stmt_size
726 > MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
728 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
729 want_inline = false;
731 else if ((DECL_DECLARED_INLINE_P (callee->decl) || e->count)
732 && inline_summaries->get (callee)->min_size
733 - inline_edge_summary (e)->call_stmt_size
734 > 16 * MAX_INLINE_INSNS_SINGLE)
736 e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
737 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
738 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
739 want_inline = false;
741 else
743 int growth = estimate_edge_growth (e);
744 inline_hints hints = estimate_edge_hints (e);
745 bool big_speedup = big_speedup_p (e);
747 if (growth <= 0)
749 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
750 hints suggests that inlining given function is very profitable. */
751 else if (DECL_DECLARED_INLINE_P (callee->decl)
752 && growth >= MAX_INLINE_INSNS_SINGLE
753 && ((!big_speedup
754 && !(hints & (INLINE_HINT_indirect_call
755 | INLINE_HINT_known_hot
756 | INLINE_HINT_loop_iterations
757 | INLINE_HINT_array_index
758 | INLINE_HINT_loop_stride)))
759 || growth >= MAX_INLINE_INSNS_SINGLE * 16))
761 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
762 want_inline = false;
764 else if (!DECL_DECLARED_INLINE_P (callee->decl)
765 && !opt_for_fn (e->caller->decl, flag_inline_functions))
767 /* growth_likely_positive is expensive, always test it last. */
768 if (growth >= MAX_INLINE_INSNS_SINGLE
769 || growth_likely_positive (callee, growth))
771 e->inline_failed = CIF_NOT_DECLARED_INLINED;
772 want_inline = false;
775 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
776 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
777 inlining given function is very profitable. */
778 else if (!DECL_DECLARED_INLINE_P (callee->decl)
779 && !big_speedup
780 && !(hints & INLINE_HINT_known_hot)
781 && growth >= ((hints & (INLINE_HINT_indirect_call
782 | INLINE_HINT_loop_iterations
783 | INLINE_HINT_array_index
784 | INLINE_HINT_loop_stride))
785 ? MAX (MAX_INLINE_INSNS_AUTO,
786 MAX_INLINE_INSNS_SINGLE)
787 : MAX_INLINE_INSNS_AUTO))
789 /* growth_likely_positive is expensive, always test it last. */
790 if (growth >= MAX_INLINE_INSNS_SINGLE
791 || growth_likely_positive (callee, growth))
793 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
794 want_inline = false;
797 /* If call is cold, do not inline when function body would grow. */
798 else if (!e->maybe_hot_p ()
799 && (growth >= MAX_INLINE_INSNS_SINGLE
800 || growth_likely_positive (callee, growth)))
802 e->inline_failed = CIF_UNLIKELY_CALL;
803 want_inline = false;
806 if (!want_inline && report)
807 report_inline_failed_reason (e);
808 return want_inline;
811 /* EDGE is self recursive edge.
812 We hand two cases - when function A is inlining into itself
813 or when function A is being inlined into another inliner copy of function
814 A within function B.
816 In first case OUTER_NODE points to the toplevel copy of A, while
817 in the second case OUTER_NODE points to the outermost copy of A in B.
819 In both cases we want to be extra selective since
820 inlining the call will just introduce new recursive calls to appear. */
822 static bool
823 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
824 struct cgraph_node *outer_node,
825 bool peeling,
826 int depth)
828 char const *reason = NULL;
829 bool want_inline = true;
830 int caller_freq = CGRAPH_FREQ_BASE;
831 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
833 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
834 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
836 if (!edge->maybe_hot_p ())
838 reason = "recursive call is cold";
839 want_inline = false;
841 else if (max_count && !outer_node->count)
843 reason = "not executed in profile";
844 want_inline = false;
846 else if (depth > max_depth)
848 reason = "--param max-inline-recursive-depth exceeded.";
849 want_inline = false;
852 if (outer_node->global.inlined_to)
853 caller_freq = outer_node->callers->frequency;
855 if (!caller_freq)
857 reason = "function is inlined and unlikely";
858 want_inline = false;
861 if (!want_inline)
863 /* Inlining of self recursive function into copy of itself within other function
864 is transformation similar to loop peeling.
866 Peeling is profitable if we can inline enough copies to make probability
867 of actual call to the self recursive function very small. Be sure that
868 the probability of recursion is small.
870 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
871 This way the expected number of recision is at most max_depth. */
872 else if (peeling)
874 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
875 / max_depth);
876 int i;
877 for (i = 1; i < depth; i++)
878 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
879 if (max_count
880 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
881 >= max_prob))
883 reason = "profile of recursive call is too large";
884 want_inline = false;
886 if (!max_count
887 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
888 >= max_prob))
890 reason = "frequency of recursive call is too large";
891 want_inline = false;
894 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
895 depth is large. We reduce function call overhead and increase chances that
896 things fit in hardware return predictor.
898 Recursive inlining might however increase cost of stack frame setup
899 actually slowing down functions whose recursion tree is wide rather than
900 deep.
902 Deciding reliably on when to do recursive inlining without profile feedback
903 is tricky. For now we disable recursive inlining when probability of self
904 recursion is low.
906 Recursive inlining of self recursive call within loop also results in large loop
907 depths that generally optimize badly. We may want to throttle down inlining
908 in those cases. In particular this seems to happen in one of libstdc++ rb tree
909 methods. */
910 else
912 if (max_count
913 && (edge->count * 100 / outer_node->count
914 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
916 reason = "profile of recursive call is too small";
917 want_inline = false;
919 else if (!max_count
920 && (edge->frequency * 100 / caller_freq
921 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
923 reason = "frequency of recursive call is too small";
924 want_inline = false;
927 if (!want_inline && dump_file)
928 fprintf (dump_file, " not inlining recursively: %s\n", reason);
929 return want_inline;
932 /* Return true when NODE has uninlinable caller;
933 set HAS_HOT_CALL if it has hot call.
934 Worker for cgraph_for_node_and_aliases. */
936 static bool
937 check_callers (struct cgraph_node *node, void *has_hot_call)
939 struct cgraph_edge *e;
940 for (e = node->callers; e; e = e->next_caller)
942 if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once))
943 return true;
944 if (!can_inline_edge_p (e, true))
945 return true;
946 if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
947 *(bool *)has_hot_call = true;
949 return false;
952 /* If NODE has a caller, return true. */
954 static bool
955 has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
957 if (node->callers)
958 return true;
959 return false;
962 /* Decide if inlining NODE would reduce unit size by eliminating
963 the offline copy of function.
964 When COLD is true the cold calls are considered, too. */
966 static bool
967 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
969 bool has_hot_call = false;
971 /* Aliases gets inlined along with the function they alias. */
972 if (node->alias)
973 return false;
974 /* Already inlined? */
975 if (node->global.inlined_to)
976 return false;
977 /* Does it have callers? */
978 if (!node->call_for_symbol_thunks_and_aliases (has_caller_p, NULL, true))
979 return false;
980 /* Inlining into all callers would increase size? */
981 if (estimate_growth (node) > 0)
982 return false;
983 /* All inlines must be possible. */
984 if (node->call_for_symbol_thunks_and_aliases (check_callers, &has_hot_call,
985 true))
986 return false;
987 if (!cold && !has_hot_call)
988 return false;
989 return true;
992 /* A cost model driving the inlining heuristics in a way so the edges with
993 smallest badness are inlined first. After each inlining is performed
994 the costs of all caller edges of nodes affected are recomputed so the
995 metrics may accurately depend on values such as number of inlinable callers
996 of the function or function body size. */
998 static sreal
999 edge_badness (struct cgraph_edge *edge, bool dump)
1001 sreal badness;
1002 int growth, edge_time;
1003 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1004 struct inline_summary *callee_info = inline_summaries->get (callee);
1005 inline_hints hints;
1006 cgraph_node *caller = (edge->caller->global.inlined_to
1007 ? edge->caller->global.inlined_to
1008 : edge->caller);
1010 growth = estimate_edge_growth (edge);
1011 edge_time = estimate_edge_time (edge);
1012 hints = estimate_edge_hints (edge);
1013 gcc_checking_assert (edge_time >= 0);
1014 gcc_checking_assert (edge_time <= callee_info->time);
1015 gcc_checking_assert (growth <= callee_info->size);
1017 if (dump)
1019 fprintf (dump_file, " Badness calculation for %s/%i -> %s/%i\n",
1020 xstrdup_for_dump (edge->caller->name ()),
1021 edge->caller->order,
1022 xstrdup_for_dump (callee->name ()),
1023 edge->callee->order);
1024 fprintf (dump_file, " size growth %i, time %i ",
1025 growth,
1026 edge_time);
1027 dump_inline_hints (dump_file, hints);
1028 if (big_speedup_p (edge))
1029 fprintf (dump_file, " big_speedup");
1030 fprintf (dump_file, "\n");
1033 /* Always prefer inlining saving code size. */
1034 if (growth <= 0)
1036 badness = (sreal) (-SREAL_MIN_SIG + growth) << (SREAL_MAX_EXP / 256);
1037 if (dump)
1038 fprintf (dump_file, " %f: Growth %d <= 0\n", badness.to_double (),
1039 growth);
1041 /* Inlining into EXTERNAL functions is not going to change anything unless
1042 they are themselves inlined. */
1043 else if (DECL_EXTERNAL (caller->decl))
1045 if (dump)
1046 fprintf (dump_file, " max: function is external\n");
1047 return sreal::max ();
1049 /* When profile is available. Compute badness as:
1051 time_saved * caller_count
1052 goodness = ---------------------------------
1053 growth_of_caller * overall_growth
1055 badness = - goodness
1057 Again use negative value to make calls with profile appear hotter
1058 then calls without.
1060 else if (opt_for_fn (caller->decl, flag_guess_branch_prob) || caller->count)
1062 sreal numerator, denominator;
1064 numerator = (compute_uninlined_call_time (callee_info, edge)
1065 - compute_inlined_call_time (edge, edge_time));
1066 if (numerator == 0)
1067 numerator = ((sreal) 1 >> 8);
1068 if (caller->count)
1069 numerator *= caller->count;
1070 else if (opt_for_fn (caller->decl, flag_branch_probabilities))
1071 numerator = numerator >> 11;
1072 denominator = growth;
1073 if (callee_info->growth > 0)
1074 denominator *= callee_info->growth;
1076 badness = - numerator / denominator;
1078 if (dump)
1080 fprintf (dump_file,
1081 " %f: guessed profile. frequency %f, count %"PRId64
1082 " caller count %"PRId64
1083 " time w/o inlining %f, time w inlining %f"
1084 " overall growth %i (current) %i (original)\n",
1085 badness.to_double (), (double)edge->frequency / CGRAPH_FREQ_BASE,
1086 edge->count, caller->count,
1087 compute_uninlined_call_time (callee_info, edge).to_double (),
1088 compute_inlined_call_time (edge, edge_time).to_double (),
1089 estimate_growth (callee),
1090 callee_info->growth);
1093 /* When function local profile is not available or it does not give
1094 useful information (ie frequency is zero), base the cost on
1095 loop nest and overall size growth, so we optimize for overall number
1096 of functions fully inlined in program. */
1097 else
1099 int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
1100 badness = growth;
1102 /* Decrease badness if call is nested. */
1103 if (badness > 0)
1104 badness = badness >> nest;
1105 else
1106 badness = badness << nest;
1107 if (dump)
1108 fprintf (dump_file, " %f: no profile. nest %i\n", badness.to_double (),
1109 nest);
1111 gcc_checking_assert (badness != 0);
1113 if (edge->recursive_p ())
1114 badness = badness.shift (badness > 0 ? 4 : -4);
1115 if ((hints & (INLINE_HINT_indirect_call
1116 | INLINE_HINT_loop_iterations
1117 | INLINE_HINT_array_index
1118 | INLINE_HINT_loop_stride))
1119 || callee_info->growth <= 0)
1120 badness = badness.shift (badness > 0 ? -2 : 2);
1121 if (hints & (INLINE_HINT_same_scc))
1122 badness = badness.shift (badness > 0 ? 3 : -3);
1123 else if (hints & (INLINE_HINT_in_scc))
1124 badness = badness.shift (badness > 0 ? 2 : -2);
1125 else if (hints & (INLINE_HINT_cross_module))
1126 badness = badness.shift (badness > 0 ? 1 : -1);
1127 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1128 badness = badness.shift (badness > 0 ? -4 : 4);
1129 else if ((hints & INLINE_HINT_declared_inline))
1130 badness = badness.shift (badness > 0 ? -3 : 3);
1131 if (dump)
1132 fprintf (dump_file, " Adjusted by hints %f\n", badness.to_double ());
1133 return badness;
1136 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1137 static inline void
1138 update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
1140 sreal badness = edge_badness (edge, false);
1141 if (edge->aux)
1143 edge_heap_node_t *n = (edge_heap_node_t *) edge->aux;
1144 gcc_checking_assert (n->get_data () == edge);
1146 /* fibonacci_heap::replace_key does busy updating of the
1147 heap that is unnecesarily expensive.
1148 We do lazy increases: after extracting minimum if the key
1149 turns out to be out of date, it is re-inserted into heap
1150 with correct value. */
1151 if (badness < n->get_key ())
1153 if (dump_file && (dump_flags & TDF_DETAILS))
1155 fprintf (dump_file,
1156 " decreasing badness %s/%i -> %s/%i, %f"
1157 " to %f\n",
1158 xstrdup_for_dump (edge->caller->name ()),
1159 edge->caller->order,
1160 xstrdup_for_dump (edge->callee->name ()),
1161 edge->callee->order,
1162 n->get_key ().to_double (),
1163 badness.to_double ());
1165 heap->decrease_key (n, badness);
1168 else
1170 if (dump_file && (dump_flags & TDF_DETAILS))
1172 fprintf (dump_file,
1173 " enqueuing call %s/%i -> %s/%i, badness %f\n",
1174 xstrdup_for_dump (edge->caller->name ()),
1175 edge->caller->order,
1176 xstrdup_for_dump (edge->callee->name ()),
1177 edge->callee->order,
1178 badness.to_double ());
1180 edge->aux = heap->insert (badness, edge);
1185 /* NODE was inlined.
1186 All caller edges needs to be resetted because
1187 size estimates change. Similarly callees needs reset
1188 because better context may be known. */
1190 static void
1191 reset_edge_caches (struct cgraph_node *node)
1193 struct cgraph_edge *edge;
1194 struct cgraph_edge *e = node->callees;
1195 struct cgraph_node *where = node;
1196 struct ipa_ref *ref;
1198 if (where->global.inlined_to)
1199 where = where->global.inlined_to;
1201 for (edge = where->callers; edge; edge = edge->next_caller)
1202 if (edge->inline_failed)
1203 reset_edge_growth_cache (edge);
1205 FOR_EACH_ALIAS (where, ref)
1206 reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
1208 if (!e)
1209 return;
1211 while (true)
1212 if (!e->inline_failed && e->callee->callees)
1213 e = e->callee->callees;
1214 else
1216 if (e->inline_failed)
1217 reset_edge_growth_cache (e);
1218 if (e->next_callee)
1219 e = e->next_callee;
1220 else
1224 if (e->caller == node)
1225 return;
1226 e = e->caller->callers;
1228 while (!e->next_callee);
1229 e = e->next_callee;
1234 /* Recompute HEAP nodes for each of caller of NODE.
1235 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1236 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1237 it is inlinable. Otherwise check all edges. */
1239 static void
1240 update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
1241 bitmap updated_nodes,
1242 struct cgraph_edge *check_inlinablity_for)
1244 struct cgraph_edge *edge;
1245 struct ipa_ref *ref;
1247 if ((!node->alias && !inline_summaries->get (node)->inlinable)
1248 || node->global.inlined_to)
1249 return;
1250 if (!bitmap_set_bit (updated_nodes, node->uid))
1251 return;
1253 FOR_EACH_ALIAS (node, ref)
1255 struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
1256 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1259 for (edge = node->callers; edge; edge = edge->next_caller)
1260 if (edge->inline_failed)
1262 if (!check_inlinablity_for
1263 || check_inlinablity_for == edge)
1265 if (can_inline_edge_p (edge, false)
1266 && want_inline_small_function_p (edge, false))
1267 update_edge_key (heap, edge);
1268 else if (edge->aux)
1270 report_inline_failed_reason (edge);
1271 heap->delete_node ((edge_heap_node_t *) edge->aux);
1272 edge->aux = NULL;
1275 else if (edge->aux)
1276 update_edge_key (heap, edge);
1280 /* Recompute HEAP nodes for each uninlined call in NODE.
1281 This is used when we know that edge badnesses are going only to increase
1282 (we introduced new call site) and thus all we need is to insert newly
1283 created edges into heap. */
1285 static void
1286 update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
1287 bitmap updated_nodes)
1289 struct cgraph_edge *e = node->callees;
1291 if (!e)
1292 return;
1293 while (true)
1294 if (!e->inline_failed && e->callee->callees)
1295 e = e->callee->callees;
1296 else
1298 enum availability avail;
1299 struct cgraph_node *callee;
1300 /* We do not reset callee growth cache here. Since we added a new call,
1301 growth chould have just increased and consequentely badness metric
1302 don't need updating. */
1303 if (e->inline_failed
1304 && (callee = e->callee->ultimate_alias_target (&avail))
1305 && inline_summaries->get (callee)->inlinable
1306 && avail >= AVAIL_AVAILABLE
1307 && !bitmap_bit_p (updated_nodes, callee->uid))
1309 if (can_inline_edge_p (e, false)
1310 && want_inline_small_function_p (e, false))
1311 update_edge_key (heap, e);
1312 else if (e->aux)
1314 report_inline_failed_reason (e);
1315 heap->delete_node ((edge_heap_node_t *) e->aux);
1316 e->aux = NULL;
1319 if (e->next_callee)
1320 e = e->next_callee;
1321 else
1325 if (e->caller == node)
1326 return;
1327 e = e->caller->callers;
1329 while (!e->next_callee);
1330 e = e->next_callee;
1335 /* Enqueue all recursive calls from NODE into priority queue depending on
1336 how likely we want to recursively inline the call. */
1338 static void
1339 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1340 edge_heap_t *heap)
1342 struct cgraph_edge *e;
1343 enum availability avail;
1345 for (e = where->callees; e; e = e->next_callee)
1346 if (e->callee == node
1347 || (e->callee->ultimate_alias_target (&avail) == node
1348 && avail > AVAIL_INTERPOSABLE))
1350 /* When profile feedback is available, prioritize by expected number
1351 of calls. */
1352 heap->insert (!max_count ? -e->frequency
1353 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
1356 for (e = where->callees; e; e = e->next_callee)
1357 if (!e->inline_failed)
1358 lookup_recursive_calls (node, e->callee, heap);
1361 /* Decide on recursive inlining: in the case function has recursive calls,
1362 inline until body size reaches given argument. If any new indirect edges
1363 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1364 is NULL. */
1366 static bool
1367 recursive_inlining (struct cgraph_edge *edge,
1368 vec<cgraph_edge *> *new_edges)
1370 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1371 edge_heap_t heap (sreal::min ());
1372 struct cgraph_node *node;
1373 struct cgraph_edge *e;
1374 struct cgraph_node *master_clone = NULL, *next;
1375 int depth = 0;
1376 int n = 0;
1378 node = edge->caller;
1379 if (node->global.inlined_to)
1380 node = node->global.inlined_to;
1382 if (DECL_DECLARED_INLINE_P (node->decl))
1383 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1385 /* Make sure that function is small enough to be considered for inlining. */
1386 if (estimate_size_after_inlining (node, edge) >= limit)
1387 return false;
1388 lookup_recursive_calls (node, node, &heap);
1389 if (heap.empty ())
1390 return false;
1392 if (dump_file)
1393 fprintf (dump_file,
1394 " Performing recursive inlining on %s\n",
1395 node->name ());
1397 /* Do the inlining and update list of recursive call during process. */
1398 while (!heap.empty ())
1400 struct cgraph_edge *curr = heap.extract_min ();
1401 struct cgraph_node *cnode, *dest = curr->callee;
1403 if (!can_inline_edge_p (curr, true))
1404 continue;
1406 /* MASTER_CLONE is produced in the case we already started modified
1407 the function. Be sure to redirect edge to the original body before
1408 estimating growths otherwise we will be seeing growths after inlining
1409 the already modified body. */
1410 if (master_clone)
1412 curr->redirect_callee (master_clone);
1413 reset_edge_growth_cache (curr);
1416 if (estimate_size_after_inlining (node, curr) > limit)
1418 curr->redirect_callee (dest);
1419 reset_edge_growth_cache (curr);
1420 break;
1423 depth = 1;
1424 for (cnode = curr->caller;
1425 cnode->global.inlined_to; cnode = cnode->callers->caller)
1426 if (node->decl
1427 == curr->callee->ultimate_alias_target ()->decl)
1428 depth++;
1430 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1432 curr->redirect_callee (dest);
1433 reset_edge_growth_cache (curr);
1434 continue;
1437 if (dump_file)
1439 fprintf (dump_file,
1440 " Inlining call of depth %i", depth);
1441 if (node->count)
1443 fprintf (dump_file, " called approx. %.2f times per call",
1444 (double)curr->count / node->count);
1446 fprintf (dump_file, "\n");
1448 if (!master_clone)
1450 /* We need original clone to copy around. */
1451 master_clone = node->create_clone (node->decl, node->count,
1452 CGRAPH_FREQ_BASE, false, vNULL,
1453 true, NULL, NULL);
1454 for (e = master_clone->callees; e; e = e->next_callee)
1455 if (!e->inline_failed)
1456 clone_inlined_nodes (e, true, false, NULL, CGRAPH_FREQ_BASE);
1457 curr->redirect_callee (master_clone);
1458 reset_edge_growth_cache (curr);
1461 inline_call (curr, false, new_edges, &overall_size, true);
1462 lookup_recursive_calls (node, curr->callee, &heap);
1463 n++;
1466 if (!heap.empty () && dump_file)
1467 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1469 if (!master_clone)
1470 return false;
1472 if (dump_file)
1473 fprintf (dump_file,
1474 "\n Inlined %i times, "
1475 "body grown from size %i to %i, time %i to %i\n", n,
1476 inline_summaries->get (master_clone)->size, inline_summaries->get (node)->size,
1477 inline_summaries->get (master_clone)->time, inline_summaries->get (node)->time);
1479 /* Remove master clone we used for inlining. We rely that clones inlined
1480 into master clone gets queued just before master clone so we don't
1481 need recursion. */
1482 for (node = symtab->first_function (); node != master_clone;
1483 node = next)
1485 next = symtab->next_function (node);
1486 if (node->global.inlined_to == master_clone)
1487 node->remove ();
1489 master_clone->remove ();
1490 return true;
1494 /* Given whole compilation unit estimate of INSNS, compute how large we can
1495 allow the unit to grow. */
1497 static int
1498 compute_max_insns (int insns)
1500 int max_insns = insns;
1501 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1502 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1504 return ((int64_t) max_insns
1505 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1509 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1511 static void
1512 add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges)
1514 while (new_edges.length () > 0)
1516 struct cgraph_edge *edge = new_edges.pop ();
1518 gcc_assert (!edge->aux);
1519 if (edge->inline_failed
1520 && can_inline_edge_p (edge, true)
1521 && want_inline_small_function_p (edge, true))
1522 edge->aux = heap->insert (edge_badness (edge, false), edge);
1526 /* Remove EDGE from the fibheap. */
1528 static void
1529 heap_edge_removal_hook (struct cgraph_edge *e, void *data)
1531 if (e->aux)
1533 ((edge_heap_t *)data)->delete_node ((edge_heap_node_t *)e->aux);
1534 e->aux = NULL;
1538 /* Return true if speculation of edge E seems useful.
1539 If ANTICIPATE_INLINING is true, be conservative and hope that E
1540 may get inlined. */
1542 bool
1543 speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
1545 enum availability avail;
1546 struct cgraph_node *target = e->callee->ultimate_alias_target (&avail);
1547 struct cgraph_edge *direct, *indirect;
1548 struct ipa_ref *ref;
1550 gcc_assert (e->speculative && !e->indirect_unknown_callee);
1552 if (!e->maybe_hot_p ())
1553 return false;
1555 /* See if IP optimizations found something potentially useful about the
1556 function. For now we look only for CONST/PURE flags. Almost everything
1557 else we propagate is useless. */
1558 if (avail >= AVAIL_AVAILABLE)
1560 int ecf_flags = flags_from_decl_or_type (target->decl);
1561 if (ecf_flags & ECF_CONST)
1563 e->speculative_call_info (direct, indirect, ref);
1564 if (!(indirect->indirect_info->ecf_flags & ECF_CONST))
1565 return true;
1567 else if (ecf_flags & ECF_PURE)
1569 e->speculative_call_info (direct, indirect, ref);
1570 if (!(indirect->indirect_info->ecf_flags & ECF_PURE))
1571 return true;
1574 /* If we did not managed to inline the function nor redirect
1575 to an ipa-cp clone (that are seen by having local flag set),
1576 it is probably pointless to inline it unless hardware is missing
1577 indirect call predictor. */
1578 if (!anticipate_inlining && e->inline_failed && !target->local.local)
1579 return false;
1580 /* For overwritable targets there is not much to do. */
1581 if (e->inline_failed && !can_inline_edge_p (e, false, true))
1582 return false;
1583 /* OK, speculation seems interesting. */
1584 return true;
1587 /* We know that EDGE is not going to be inlined.
1588 See if we can remove speculation. */
1590 static void
1591 resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
1593 if (edge->speculative && !speculation_useful_p (edge, false))
1595 struct cgraph_node *node = edge->caller;
1596 struct cgraph_node *where = node->global.inlined_to
1597 ? node->global.inlined_to : node;
1598 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1600 spec_rem += edge->count;
1601 edge->resolve_speculation ();
1602 reset_edge_caches (where);
1603 inline_update_overall_summary (where);
1604 update_caller_keys (edge_heap, where,
1605 updated_nodes, NULL);
1606 update_callee_keys (edge_heap, where,
1607 updated_nodes);
1608 BITMAP_FREE (updated_nodes);
1612 /* Return true if NODE should be accounted for overall size estimate.
1613 Skip all nodes optimized for size so we can measure the growth of hot
1614 part of program no matter of the padding. */
1616 bool
1617 inline_account_function_p (struct cgraph_node *node)
1619 return (!DECL_EXTERNAL (node->decl)
1620 && !opt_for_fn (node->decl, optimize_size)
1621 && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED);
1624 /* We use greedy algorithm for inlining of small functions:
1625 All inline candidates are put into prioritized heap ordered in
1626 increasing badness.
1628 The inlining of small functions is bounded by unit growth parameters. */
1630 static void
1631 inline_small_functions (void)
1633 struct cgraph_node *node;
1634 struct cgraph_edge *edge;
1635 edge_heap_t edge_heap (sreal::min ());
1636 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1637 int min_size, max_size;
1638 auto_vec<cgraph_edge *> new_indirect_edges;
1639 int initial_size = 0;
1640 struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
1641 struct cgraph_edge_hook_list *edge_removal_hook_holder;
1642 new_indirect_edges.create (8);
1644 edge_removal_hook_holder
1645 = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap);
1647 /* Compute overall unit size and other global parameters used by badness
1648 metrics. */
1650 max_count = 0;
1651 ipa_reduced_postorder (order, true, true, NULL);
1652 free (order);
1654 FOR_EACH_DEFINED_FUNCTION (node)
1655 if (!node->global.inlined_to)
1657 if (!node->alias && node->analyzed
1658 && (node->has_gimple_body_p () || node->thunk.thunk_p))
1660 struct inline_summary *info = inline_summaries->get (node);
1661 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
1663 /* Do not account external functions, they will be optimized out
1664 if not inlined. Also only count the non-cold portion of program. */
1665 if (inline_account_function_p (node))
1666 initial_size += info->size;
1667 info->growth = estimate_growth (node);
1668 if (dfs && dfs->next_cycle)
1670 struct cgraph_node *n2;
1671 int id = dfs->scc_no + 1;
1672 for (n2 = node; n2;
1673 n2 = ((struct ipa_dfs_info *) node->aux)->next_cycle)
1675 struct inline_summary *info2 = inline_summaries->get (n2);
1676 if (info2->scc_no)
1677 break;
1678 info2->scc_no = id;
1683 for (edge = node->callers; edge; edge = edge->next_caller)
1684 if (max_count < edge->count)
1685 max_count = edge->count;
1687 ipa_free_postorder_info ();
1688 initialize_growth_caches ();
1690 if (dump_file)
1691 fprintf (dump_file,
1692 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1693 initial_size);
1695 overall_size = initial_size;
1696 max_size = compute_max_insns (overall_size);
1697 min_size = overall_size;
1699 /* Populate the heap with all edges we might inline. */
1701 FOR_EACH_DEFINED_FUNCTION (node)
1703 bool update = false;
1704 struct cgraph_edge *next;
1705 bool has_speculative = false;
1707 if (dump_file)
1708 fprintf (dump_file, "Enqueueing calls in %s/%i.\n",
1709 node->name (), node->order);
1711 for (edge = node->callees; edge; edge = next)
1713 next = edge->next_callee;
1714 if (edge->inline_failed
1715 && !edge->aux
1716 && can_inline_edge_p (edge, true)
1717 && want_inline_small_function_p (edge, true)
1718 && edge->inline_failed)
1720 gcc_assert (!edge->aux);
1721 update_edge_key (&edge_heap, edge);
1723 if (edge->speculative)
1724 has_speculative = true;
1726 if (has_speculative)
1727 for (edge = node->callees; edge; edge = next)
1728 if (edge->speculative && !speculation_useful_p (edge,
1729 edge->aux != NULL))
1731 edge->resolve_speculation ();
1732 update = true;
1734 if (update)
1736 struct cgraph_node *where = node->global.inlined_to
1737 ? node->global.inlined_to : node;
1738 inline_update_overall_summary (where);
1739 reset_edge_caches (where);
1740 update_caller_keys (&edge_heap, where,
1741 updated_nodes, NULL);
1742 update_callee_keys (&edge_heap, where,
1743 updated_nodes);
1744 bitmap_clear (updated_nodes);
1748 gcc_assert (in_lto_p
1749 || !max_count
1750 || (profile_info && flag_branch_probabilities));
1752 while (!edge_heap.empty ())
1754 int old_size = overall_size;
1755 struct cgraph_node *where, *callee;
1756 sreal badness = edge_heap.min_key ();
1757 sreal current_badness;
1758 int growth;
1760 edge = edge_heap.extract_min ();
1761 gcc_assert (edge->aux);
1762 edge->aux = NULL;
1763 if (!edge->inline_failed || !edge->callee->analyzed)
1764 continue;
1766 #ifdef ENABLE_CHECKING
1767 /* Be sure that caches are maintained consistent. */
1768 sreal cached_badness = edge_badness (edge, false);
1770 int old_size_est = estimate_edge_size (edge);
1771 int old_time_est = estimate_edge_time (edge);
1772 int old_hints_est = estimate_edge_hints (edge);
1774 reset_edge_growth_cache (edge);
1775 gcc_assert (old_size_est == estimate_edge_size (edge));
1776 gcc_assert (old_time_est == estimate_edge_time (edge));
1777 /* FIXME:
1779 gcc_assert (old_hints_est == estimate_edge_hints (edge));
1781 fails with profile feedback because some hints depends on
1782 maybe_hot_edge_p predicate and because callee gets inlined to other
1783 calls, the edge may become cold.
1784 This ought to be fixed by computing relative probabilities
1785 for given invocation but that will be better done once whole
1786 code is converted to sreals. Disable for now and revert to "wrong"
1787 value so enable/disable checking paths agree. */
1788 edge_growth_cache[edge->uid].hints = old_hints_est + 1;
1790 /* When updating the edge costs, we only decrease badness in the keys.
1791 Increases of badness are handled lazilly; when we see key with out
1792 of date value on it, we re-insert it now. */
1793 current_badness = edge_badness (edge, false);
1794 /* Disable checking for profile because roundoff errors may cause slight
1795 deviations in the order. */
1796 gcc_assert (max_count || cached_badness == current_badness);
1797 gcc_assert (current_badness >= badness);
1798 #else
1799 current_badness = edge_badness (edge, false);
1800 #endif
1801 if (current_badness != badness)
1803 if (edge_heap.min () && current_badness > edge_heap.min_key ())
1805 edge->aux = edge_heap.insert (current_badness, edge);
1806 continue;
1808 else
1809 badness = current_badness;
1812 if (!can_inline_edge_p (edge, true))
1814 resolve_noninline_speculation (&edge_heap, edge);
1815 continue;
1818 callee = edge->callee->ultimate_alias_target ();
1819 growth = estimate_edge_growth (edge);
1820 if (dump_file)
1822 fprintf (dump_file,
1823 "\nConsidering %s/%i with %i size\n",
1824 callee->name (), callee->order,
1825 inline_summaries->get (callee)->size);
1826 fprintf (dump_file,
1827 " to be inlined into %s/%i in %s:%i\n"
1828 " Estimated badness is %f, frequency %.2f.\n",
1829 edge->caller->name (), edge->caller->order,
1830 edge->call_stmt
1831 && (LOCATION_LOCUS (gimple_location ((const_gimple)
1832 edge->call_stmt))
1833 > BUILTINS_LOCATION)
1834 ? gimple_filename ((const_gimple) edge->call_stmt)
1835 : "unknown",
1836 edge->call_stmt
1837 ? gimple_lineno ((const_gimple) edge->call_stmt)
1838 : -1,
1839 badness.to_double (),
1840 edge->frequency / (double)CGRAPH_FREQ_BASE);
1841 if (edge->count)
1842 fprintf (dump_file," Called %"PRId64"x\n",
1843 edge->count);
1844 if (dump_flags & TDF_DETAILS)
1845 edge_badness (edge, true);
1848 if (overall_size + growth > max_size
1849 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1851 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1852 report_inline_failed_reason (edge);
1853 resolve_noninline_speculation (&edge_heap, edge);
1854 continue;
1857 if (!want_inline_small_function_p (edge, true))
1859 resolve_noninline_speculation (&edge_heap, edge);
1860 continue;
1863 /* Heuristics for inlining small functions work poorly for
1864 recursive calls where we do effects similar to loop unrolling.
1865 When inlining such edge seems profitable, leave decision on
1866 specific inliner. */
1867 if (edge->recursive_p ())
1869 where = edge->caller;
1870 if (where->global.inlined_to)
1871 where = where->global.inlined_to;
1872 if (!recursive_inlining (edge,
1873 opt_for_fn (edge->caller->decl,
1874 flag_indirect_inlining)
1875 ? &new_indirect_edges : NULL))
1877 edge->inline_failed = CIF_RECURSIVE_INLINING;
1878 resolve_noninline_speculation (&edge_heap, edge);
1879 continue;
1881 reset_edge_caches (where);
1882 /* Recursive inliner inlines all recursive calls of the function
1883 at once. Consequently we need to update all callee keys. */
1884 if (opt_for_fn (edge->caller->decl, flag_indirect_inlining))
1885 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
1886 update_callee_keys (&edge_heap, where, updated_nodes);
1887 bitmap_clear (updated_nodes);
1889 else
1891 struct cgraph_node *outer_node = NULL;
1892 int depth = 0;
1894 /* Consider the case where self recursive function A is inlined
1895 into B. This is desired optimization in some cases, since it
1896 leads to effect similar of loop peeling and we might completely
1897 optimize out the recursive call. However we must be extra
1898 selective. */
1900 where = edge->caller;
1901 while (where->global.inlined_to)
1903 if (where->decl == callee->decl)
1904 outer_node = where, depth++;
1905 where = where->callers->caller;
1907 if (outer_node
1908 && !want_inline_self_recursive_call_p (edge, outer_node,
1909 true, depth))
1911 edge->inline_failed
1912 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
1913 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1914 resolve_noninline_speculation (&edge_heap, edge);
1915 continue;
1917 else if (depth && dump_file)
1918 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
1920 gcc_checking_assert (!callee->global.inlined_to);
1921 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
1922 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
1924 reset_edge_caches (edge->callee->function_symbol ());
1926 update_callee_keys (&edge_heap, where, updated_nodes);
1928 where = edge->caller;
1929 if (where->global.inlined_to)
1930 where = where->global.inlined_to;
1932 /* Our profitability metric can depend on local properties
1933 such as number of inlinable calls and size of the function body.
1934 After inlining these properties might change for the function we
1935 inlined into (since it's body size changed) and for the functions
1936 called by function we inlined (since number of it inlinable callers
1937 might change). */
1938 update_caller_keys (&edge_heap, where, updated_nodes, NULL);
1939 /* Offline copy count has possibly changed, recompute if profile is
1940 available. */
1941 if (max_count)
1943 struct cgraph_node *n = cgraph_node::get (edge->callee->decl);
1944 if (n != edge->callee && n->analyzed)
1945 update_callee_keys (&edge_heap, n, updated_nodes);
1947 bitmap_clear (updated_nodes);
1949 if (dump_file)
1951 fprintf (dump_file,
1952 " Inlined into %s which now has time %i and size %i,"
1953 "net change of %+i.\n",
1954 edge->caller->name (),
1955 inline_summaries->get (edge->caller)->time,
1956 inline_summaries->get (edge->caller)->size,
1957 overall_size - old_size);
1959 if (min_size > overall_size)
1961 min_size = overall_size;
1962 max_size = compute_max_insns (min_size);
1964 if (dump_file)
1965 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1969 free_growth_caches ();
1970 if (dump_file)
1971 fprintf (dump_file,
1972 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1973 initial_size, overall_size,
1974 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
1975 BITMAP_FREE (updated_nodes);
1976 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
1979 /* Flatten NODE. Performed both during early inlining and
1980 at IPA inlining time. */
1982 static void
1983 flatten_function (struct cgraph_node *node, bool early)
1985 struct cgraph_edge *e;
1987 /* We shouldn't be called recursively when we are being processed. */
1988 gcc_assert (node->aux == NULL);
1990 node->aux = (void *) node;
1992 for (e = node->callees; e; e = e->next_callee)
1994 struct cgraph_node *orig_callee;
1995 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
1997 /* We've hit cycle? It is time to give up. */
1998 if (callee->aux)
2000 if (dump_file)
2001 fprintf (dump_file,
2002 "Not inlining %s into %s to avoid cycle.\n",
2003 xstrdup_for_dump (callee->name ()),
2004 xstrdup_for_dump (e->caller->name ()));
2005 e->inline_failed = CIF_RECURSIVE_INLINING;
2006 continue;
2009 /* When the edge is already inlined, we just need to recurse into
2010 it in order to fully flatten the leaves. */
2011 if (!e->inline_failed)
2013 flatten_function (callee, early);
2014 continue;
2017 /* Flatten attribute needs to be processed during late inlining. For
2018 extra code quality we however do flattening during early optimization,
2019 too. */
2020 if (!early
2021 ? !can_inline_edge_p (e, true)
2022 : !can_early_inline_edge_p (e))
2023 continue;
2025 if (e->recursive_p ())
2027 if (dump_file)
2028 fprintf (dump_file, "Not inlining: recursive call.\n");
2029 continue;
2032 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
2033 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
2035 if (dump_file)
2036 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
2037 continue;
2040 /* Inline the edge and flatten the inline clone. Avoid
2041 recursing through the original node if the node was cloned. */
2042 if (dump_file)
2043 fprintf (dump_file, " Inlining %s into %s.\n",
2044 xstrdup_for_dump (callee->name ()),
2045 xstrdup_for_dump (e->caller->name ()));
2046 orig_callee = callee;
2047 inline_call (e, true, NULL, NULL, false);
2048 if (e->callee != orig_callee)
2049 orig_callee->aux = (void *) node;
2050 flatten_function (e->callee, early);
2051 if (e->callee != orig_callee)
2052 orig_callee->aux = NULL;
2055 node->aux = NULL;
2056 if (!node->global.inlined_to)
2057 inline_update_overall_summary (node);
2060 /* Count number of callers of NODE and store it into DATA (that
2061 points to int. Worker for cgraph_for_node_and_aliases. */
2063 static bool
2064 sum_callers (struct cgraph_node *node, void *data)
2066 struct cgraph_edge *e;
2067 int *num_calls = (int *)data;
2069 for (e = node->callers; e; e = e->next_caller)
2070 (*num_calls)++;
2071 return false;
2074 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
2075 DATA points to number of calls originally found so we avoid infinite
2076 recursion. */
2078 static bool
2079 inline_to_all_callers (struct cgraph_node *node, void *data)
2081 int *num_calls = (int *)data;
2082 bool callee_removed = false;
2084 while (node->callers && !node->global.inlined_to)
2086 struct cgraph_node *caller = node->callers->caller;
2088 if (dump_file)
2090 fprintf (dump_file,
2091 "\nInlining %s size %i.\n",
2092 node->name (),
2093 inline_summaries->get (node)->size);
2094 fprintf (dump_file,
2095 " Called once from %s %i insns.\n",
2096 node->callers->caller->name (),
2097 inline_summaries->get (node->callers->caller)->size);
2100 inline_call (node->callers, true, NULL, NULL, true, &callee_removed);
2101 if (dump_file)
2102 fprintf (dump_file,
2103 " Inlined into %s which now has %i size\n",
2104 caller->name (),
2105 inline_summaries->get (caller)->size);
2106 if (!(*num_calls)--)
2108 if (dump_file)
2109 fprintf (dump_file, "New calls found; giving up.\n");
2110 return callee_removed;
2112 if (callee_removed)
2113 return true;
2115 return false;
2118 /* Output overall time estimate. */
2119 static void
2120 dump_overall_stats (void)
2122 int64_t sum_weighted = 0, sum = 0;
2123 struct cgraph_node *node;
2125 FOR_EACH_DEFINED_FUNCTION (node)
2126 if (!node->global.inlined_to
2127 && !node->alias)
2129 int time = inline_summaries->get (node)->time;
2130 sum += time;
2131 sum_weighted += time * node->count;
2133 fprintf (dump_file, "Overall time estimate: "
2134 "%"PRId64" weighted by profile: "
2135 "%"PRId64"\n", sum, sum_weighted);
2138 /* Output some useful stats about inlining. */
2140 static void
2141 dump_inline_stats (void)
2143 int64_t inlined_cnt = 0, inlined_indir_cnt = 0;
2144 int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0;
2145 int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0;
2146 int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
2147 int64_t inlined_speculative = 0, inlined_speculative_ply = 0;
2148 int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
2149 int64_t reason[CIF_N_REASONS][3];
2150 int i;
2151 struct cgraph_node *node;
2153 memset (reason, 0, sizeof (reason));
2154 FOR_EACH_DEFINED_FUNCTION (node)
2156 struct cgraph_edge *e;
2157 for (e = node->callees; e; e = e->next_callee)
2159 if (e->inline_failed)
2161 reason[(int) e->inline_failed][0] += e->count;
2162 reason[(int) e->inline_failed][1] += e->frequency;
2163 reason[(int) e->inline_failed][2] ++;
2164 if (DECL_VIRTUAL_P (e->callee->decl))
2166 if (e->indirect_inlining_edge)
2167 noninlined_virt_indir_cnt += e->count;
2168 else
2169 noninlined_virt_cnt += e->count;
2171 else
2173 if (e->indirect_inlining_edge)
2174 noninlined_indir_cnt += e->count;
2175 else
2176 noninlined_cnt += e->count;
2179 else
2181 if (e->speculative)
2183 if (DECL_VIRTUAL_P (e->callee->decl))
2184 inlined_speculative_ply += e->count;
2185 else
2186 inlined_speculative += e->count;
2188 else if (DECL_VIRTUAL_P (e->callee->decl))
2190 if (e->indirect_inlining_edge)
2191 inlined_virt_indir_cnt += e->count;
2192 else
2193 inlined_virt_cnt += e->count;
2195 else
2197 if (e->indirect_inlining_edge)
2198 inlined_indir_cnt += e->count;
2199 else
2200 inlined_cnt += e->count;
2204 for (e = node->indirect_calls; e; e = e->next_callee)
2205 if (e->indirect_info->polymorphic)
2206 indirect_poly_cnt += e->count;
2207 else
2208 indirect_cnt += e->count;
2210 if (max_count)
2212 fprintf (dump_file,
2213 "Inlined %"PRId64 " + speculative "
2214 "%"PRId64 " + speculative polymorphic "
2215 "%"PRId64 " + previously indirect "
2216 "%"PRId64 " + virtual "
2217 "%"PRId64 " + virtual and previously indirect "
2218 "%"PRId64 "\n" "Not inlined "
2219 "%"PRId64 " + previously indirect "
2220 "%"PRId64 " + virtual "
2221 "%"PRId64 " + virtual and previously indirect "
2222 "%"PRId64 " + stil indirect "
2223 "%"PRId64 " + still indirect polymorphic "
2224 "%"PRId64 "\n", inlined_cnt,
2225 inlined_speculative, inlined_speculative_ply,
2226 inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
2227 noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
2228 noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
2229 fprintf (dump_file,
2230 "Removed speculations %"PRId64 "\n",
2231 spec_rem);
2233 dump_overall_stats ();
2234 fprintf (dump_file, "\nWhy inlining failed?\n");
2235 for (i = 0; i < CIF_N_REASONS; i++)
2236 if (reason[i][2])
2237 fprintf (dump_file, "%-50s: %8i calls, %8i freq, %"PRId64" count\n",
2238 cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
2239 (int) reason[i][2], (int) reason[i][1], reason[i][0]);
2242 /* Decide on the inlining. We do so in the topological order to avoid
2243 expenses on updating data structures. */
2245 static unsigned int
2246 ipa_inline (void)
2248 struct cgraph_node *node;
2249 int nnodes;
2250 struct cgraph_node **order;
2251 int i;
2252 int cold;
2253 bool remove_functions = false;
2255 if (!optimize)
2256 return 0;
2258 cgraph_freq_base_rec = (sreal) 1 / (sreal) CGRAPH_FREQ_BASE;
2259 percent_rec = (sreal) 1 / (sreal) 100;
2261 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
2263 if (in_lto_p && optimize)
2264 ipa_update_after_lto_read ();
2266 if (dump_file)
2267 dump_inline_summaries (dump_file);
2269 nnodes = ipa_reverse_postorder (order);
2271 FOR_EACH_FUNCTION (node)
2272 node->aux = 0;
2274 if (dump_file)
2275 fprintf (dump_file, "\nFlattening functions:\n");
2277 /* In the first pass handle functions to be flattened. Do this with
2278 a priority so none of our later choices will make this impossible. */
2279 for (i = nnodes - 1; i >= 0; i--)
2281 node = order[i];
2283 /* Handle nodes to be flattened.
2284 Ideally when processing callees we stop inlining at the
2285 entry of cycles, possibly cloning that entry point and
2286 try to flatten itself turning it into a self-recursive
2287 function. */
2288 if (lookup_attribute ("flatten",
2289 DECL_ATTRIBUTES (node->decl)) != NULL)
2291 if (dump_file)
2292 fprintf (dump_file,
2293 "Flattening %s\n", node->name ());
2294 flatten_function (node, false);
2297 if (dump_file)
2298 dump_overall_stats ();
2300 inline_small_functions ();
2302 gcc_assert (symtab->state == IPA_SSA);
2303 symtab->state = IPA_SSA_AFTER_INLINING;
2304 /* Do first after-inlining removal. We want to remove all "stale" extern
2305 inline functions and virtual functions so we really know what is called
2306 once. */
2307 symtab->remove_unreachable_nodes (dump_file);
2308 free (order);
2310 /* Inline functions with a property that after inlining into all callers the
2311 code size will shrink because the out-of-line copy is eliminated.
2312 We do this regardless on the callee size as long as function growth limits
2313 are met. */
2314 if (dump_file)
2315 fprintf (dump_file,
2316 "\nDeciding on functions to be inlined into all callers and "
2317 "removing useless speculations:\n");
2319 /* Inlining one function called once has good chance of preventing
2320 inlining other function into the same callee. Ideally we should
2321 work in priority order, but probably inlining hot functions first
2322 is good cut without the extra pain of maintaining the queue.
2324 ??? this is not really fitting the bill perfectly: inlining function
2325 into callee often leads to better optimization of callee due to
2326 increased context for optimization.
2327 For example if main() function calls a function that outputs help
2328 and then function that does the main optmization, we should inline
2329 the second with priority even if both calls are cold by themselves.
2331 We probably want to implement new predicate replacing our use of
2332 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2333 to be hot. */
2334 for (cold = 0; cold <= 1; cold ++)
2336 FOR_EACH_DEFINED_FUNCTION (node)
2338 struct cgraph_edge *edge, *next;
2339 bool update=false;
2341 for (edge = node->callees; edge; edge = next)
2343 next = edge->next_callee;
2344 if (edge->speculative && !speculation_useful_p (edge, false))
2346 edge->resolve_speculation ();
2347 spec_rem += edge->count;
2348 update = true;
2349 remove_functions = true;
2352 if (update)
2354 struct cgraph_node *where = node->global.inlined_to
2355 ? node->global.inlined_to : node;
2356 reset_edge_caches (where);
2357 inline_update_overall_summary (where);
2359 if (want_inline_function_to_all_callers_p (node, cold))
2361 int num_calls = 0;
2362 node->call_for_symbol_thunks_and_aliases (sum_callers, &num_calls,
2363 true);
2364 while (node->call_for_symbol_thunks_and_aliases
2365 (inline_to_all_callers, &num_calls, true))
2367 remove_functions = true;
2372 /* Free ipa-prop structures if they are no longer needed. */
2373 if (optimize)
2374 ipa_free_all_structures_after_iinln ();
2376 if (dump_file)
2378 fprintf (dump_file,
2379 "\nInlined %i calls, eliminated %i functions\n\n",
2380 ncalls_inlined, nfunctions_inlined);
2381 dump_inline_stats ();
2384 if (dump_file)
2385 dump_inline_summaries (dump_file);
2386 /* In WPA we use inline summaries for partitioning process. */
2387 if (!flag_wpa)
2388 inline_free_summary ();
2389 return remove_functions ? TODO_remove_functions : 0;
2392 /* Inline always-inline function calls in NODE. */
2394 static bool
2395 inline_always_inline_functions (struct cgraph_node *node)
2397 struct cgraph_edge *e;
2398 bool inlined = false;
2400 for (e = node->callees; e; e = e->next_callee)
2402 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2403 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
2404 continue;
2406 if (e->recursive_p ())
2408 if (dump_file)
2409 fprintf (dump_file, " Not inlining recursive call to %s.\n",
2410 e->callee->name ());
2411 e->inline_failed = CIF_RECURSIVE_INLINING;
2412 continue;
2415 if (!can_early_inline_edge_p (e))
2417 /* Set inlined to true if the callee is marked "always_inline" but
2418 is not inlinable. This will allow flagging an error later in
2419 expand_call_inline in tree-inline.c. */
2420 if (lookup_attribute ("always_inline",
2421 DECL_ATTRIBUTES (callee->decl)) != NULL)
2422 inlined = true;
2423 continue;
2426 if (dump_file)
2427 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
2428 xstrdup_for_dump (e->callee->name ()),
2429 xstrdup_for_dump (e->caller->name ()));
2430 inline_call (e, true, NULL, NULL, false);
2431 inlined = true;
2433 if (inlined)
2434 inline_update_overall_summary (node);
2436 return inlined;
2439 /* Decide on the inlining. We do so in the topological order to avoid
2440 expenses on updating data structures. */
2442 static bool
2443 early_inline_small_functions (struct cgraph_node *node)
2445 struct cgraph_edge *e;
2446 bool inlined = false;
2448 for (e = node->callees; e; e = e->next_callee)
2450 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2451 if (!inline_summaries->get (callee)->inlinable
2452 || !e->inline_failed)
2453 continue;
2455 /* Do not consider functions not declared inline. */
2456 if (!DECL_DECLARED_INLINE_P (callee->decl)
2457 && !opt_for_fn (node->decl, flag_inline_small_functions)
2458 && !opt_for_fn (node->decl, flag_inline_functions))
2459 continue;
2461 if (dump_file)
2462 fprintf (dump_file, "Considering inline candidate %s.\n",
2463 callee->name ());
2465 if (!can_early_inline_edge_p (e))
2466 continue;
2468 if (e->recursive_p ())
2470 if (dump_file)
2471 fprintf (dump_file, " Not inlining: recursive call.\n");
2472 continue;
2475 if (!want_early_inline_function_p (e))
2476 continue;
2478 if (dump_file)
2479 fprintf (dump_file, " Inlining %s into %s.\n",
2480 xstrdup_for_dump (callee->name ()),
2481 xstrdup_for_dump (e->caller->name ()));
2482 inline_call (e, true, NULL, NULL, true);
2483 inlined = true;
2486 return inlined;
2489 unsigned int
2490 early_inliner (function *fun)
2492 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2493 struct cgraph_edge *edge;
2494 unsigned int todo = 0;
2495 int iterations = 0;
2496 bool inlined = false;
2498 if (seen_error ())
2499 return 0;
2501 /* Do nothing if datastructures for ipa-inliner are already computed. This
2502 happens when some pass decides to construct new function and
2503 cgraph_add_new_function calls lowering passes and early optimization on
2504 it. This may confuse ourself when early inliner decide to inline call to
2505 function clone, because function clones don't have parameter list in
2506 ipa-prop matching their signature. */
2507 if (ipa_node_params_sum)
2508 return 0;
2510 #ifdef ENABLE_CHECKING
2511 node->verify ();
2512 #endif
2513 node->remove_all_references ();
2515 /* Rebuild this reference because it dosn't depend on
2516 function's body and it's required to pass cgraph_node
2517 verification. */
2518 if (node->instrumented_version
2519 && !node->instrumentation_clone)
2520 node->create_reference (node->instrumented_version, IPA_REF_CHKP, NULL);
2522 /* Even when not optimizing or not inlining inline always-inline
2523 functions. */
2524 inlined = inline_always_inline_functions (node);
2526 if (!optimize
2527 || flag_no_inline
2528 || !flag_early_inlining
2529 /* Never inline regular functions into always-inline functions
2530 during incremental inlining. This sucks as functions calling
2531 always inline functions will get less optimized, but at the
2532 same time inlining of functions calling always inline
2533 function into an always inline function might introduce
2534 cycles of edges to be always inlined in the callgraph.
2536 We might want to be smarter and just avoid this type of inlining. */
2537 || (DECL_DISREGARD_INLINE_LIMITS (node->decl)
2538 && lookup_attribute ("always_inline",
2539 DECL_ATTRIBUTES (node->decl))))
2541 else if (lookup_attribute ("flatten",
2542 DECL_ATTRIBUTES (node->decl)) != NULL)
2544 /* When the function is marked to be flattened, recursively inline
2545 all calls in it. */
2546 if (dump_file)
2547 fprintf (dump_file,
2548 "Flattening %s\n", node->name ());
2549 flatten_function (node, true);
2550 inlined = true;
2552 else
2554 /* If some always_inline functions was inlined, apply the changes.
2555 This way we will not account always inline into growth limits and
2556 moreover we will inline calls from always inlines that we skipped
2557 previously becuase of conditional above. */
2558 if (inlined)
2560 timevar_push (TV_INTEGRATION);
2561 todo |= optimize_inline_calls (current_function_decl);
2562 inline_update_overall_summary (node);
2563 inlined = false;
2564 timevar_pop (TV_INTEGRATION);
2566 /* We iterate incremental inlining to get trivial cases of indirect
2567 inlining. */
2568 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
2569 && early_inline_small_functions (node))
2571 timevar_push (TV_INTEGRATION);
2572 todo |= optimize_inline_calls (current_function_decl);
2574 /* Technically we ought to recompute inline parameters so the new
2575 iteration of early inliner works as expected. We however have
2576 values approximately right and thus we only need to update edge
2577 info that might be cleared out for newly discovered edges. */
2578 for (edge = node->callees; edge; edge = edge->next_callee)
2580 /* We have no summary for new bound store calls yet. */
2581 if (inline_edge_summary_vec.length () > (unsigned)edge->uid)
2583 struct inline_edge_summary *es = inline_edge_summary (edge);
2584 es->call_stmt_size
2585 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2586 es->call_stmt_time
2587 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2589 if (edge->callee->decl
2590 && !gimple_check_call_matching_types (
2591 edge->call_stmt, edge->callee->decl, false))
2592 edge->call_stmt_cannot_inline_p = true;
2594 if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
2595 inline_update_overall_summary (node);
2596 timevar_pop (TV_INTEGRATION);
2597 iterations++;
2598 inlined = false;
2600 if (dump_file)
2601 fprintf (dump_file, "Iterations: %i\n", iterations);
2604 if (inlined)
2606 timevar_push (TV_INTEGRATION);
2607 todo |= optimize_inline_calls (current_function_decl);
2608 timevar_pop (TV_INTEGRATION);
2611 fun->always_inline_functions_inlined = true;
2613 return todo;
2616 /* Do inlining of small functions. Doing so early helps profiling and other
2617 passes to be somewhat more effective and avoids some code duplication in
2618 later real inlining pass for testcases with very many function calls. */
2620 namespace {
2622 const pass_data pass_data_early_inline =
2624 GIMPLE_PASS, /* type */
2625 "einline", /* name */
2626 OPTGROUP_INLINE, /* optinfo_flags */
2627 TV_EARLY_INLINING, /* tv_id */
2628 PROP_ssa, /* properties_required */
2629 0, /* properties_provided */
2630 0, /* properties_destroyed */
2631 0, /* todo_flags_start */
2632 0, /* todo_flags_finish */
2635 class pass_early_inline : public gimple_opt_pass
2637 public:
2638 pass_early_inline (gcc::context *ctxt)
2639 : gimple_opt_pass (pass_data_early_inline, ctxt)
2642 /* opt_pass methods: */
2643 virtual unsigned int execute (function *);
2645 }; // class pass_early_inline
2647 unsigned int
2648 pass_early_inline::execute (function *fun)
2650 return early_inliner (fun);
2653 } // anon namespace
2655 gimple_opt_pass *
2656 make_pass_early_inline (gcc::context *ctxt)
2658 return new pass_early_inline (ctxt);
2661 namespace {
2663 const pass_data pass_data_ipa_inline =
2665 IPA_PASS, /* type */
2666 "inline", /* name */
2667 OPTGROUP_INLINE, /* optinfo_flags */
2668 TV_IPA_INLINING, /* tv_id */
2669 0, /* properties_required */
2670 0, /* properties_provided */
2671 0, /* properties_destroyed */
2672 0, /* todo_flags_start */
2673 ( TODO_dump_symtab ), /* todo_flags_finish */
2676 class pass_ipa_inline : public ipa_opt_pass_d
2678 public:
2679 pass_ipa_inline (gcc::context *ctxt)
2680 : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
2681 inline_generate_summary, /* generate_summary */
2682 inline_write_summary, /* write_summary */
2683 inline_read_summary, /* read_summary */
2684 NULL, /* write_optimization_summary */
2685 NULL, /* read_optimization_summary */
2686 NULL, /* stmt_fixup */
2687 0, /* function_transform_todo_flags_start */
2688 inline_transform, /* function_transform */
2689 NULL) /* variable_transform */
2692 /* opt_pass methods: */
2693 virtual unsigned int execute (function *) { return ipa_inline (); }
2695 }; // class pass_ipa_inline
2697 } // anon namespace
2699 ipa_opt_pass_d *
2700 make_pass_ipa_inline (gcc::context *ctxt)
2702 return new pass_ipa_inline (ctxt);