Fix bootstrap/PR63632
[official-gcc.git] / gcc / ipa-inline.c
blobf0177dfc60d1b953f0c9973478349ae7a486df9e
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
29 on).
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
34 inlining.
36 inlining heuristics
38 The inliner itself is split into two passes:
40 pass_early_inlining
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
55 flattening.
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
60 optimizers.
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
68 pass_ipa_inline
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "tm.h"
96 #include "tree.h"
97 #include "trans-mem.h"
98 #include "calls.h"
99 #include "tree-inline.h"
100 #include "langhooks.h"
101 #include "flags.h"
102 #include "diagnostic.h"
103 #include "gimple-pretty-print.h"
104 #include "params.h"
105 #include "fibheap.h"
106 #include "intl.h"
107 #include "tree-pass.h"
108 #include "coverage.h"
109 #include "rtl.h"
110 #include "bitmap.h"
111 #include "profile.h"
112 #include "basic-block.h"
113 #include "tree-ssa-alias.h"
114 #include "internal-fn.h"
115 #include "gimple-expr.h"
116 #include "is-a.h"
117 #include "gimple.h"
118 #include "gimple-ssa.h"
119 #include "ipa-prop.h"
120 #include "except.h"
121 #include "target.h"
122 #include "ipa-inline.h"
123 #include "ipa-utils.h"
124 #include "sreal.h"
125 #include "auto-profile.h"
126 #include "cilk.h"
127 #include "builtins.h"
129 /* Statistics we collect about inlining algorithm. */
130 static int overall_size;
131 static gcov_type max_count;
132 static sreal max_count_real, max_relbenefit_real, half_int_min_real;
133 static gcov_type spec_rem;
135 /* Return false when inlining edge E would lead to violating
136 limits on function unit growth or stack usage growth.
138 The relative function body growth limit is present generally
139 to avoid problems with non-linear behavior of the compiler.
140 To allow inlining huge functions into tiny wrapper, the limit
141 is always based on the bigger of the two functions considered.
143 For stack growth limits we always base the growth in stack usage
144 of the callers. We want to prevent applications from segfaulting
145 on stack overflow when functions with huge stack frames gets
146 inlined. */
148 static bool
149 caller_growth_limits (struct cgraph_edge *e)
151 struct cgraph_node *to = e->caller;
152 struct cgraph_node *what = e->callee->ultimate_alias_target ();
153 int newsize;
154 int limit = 0;
155 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
156 struct inline_summary *info, *what_info, *outer_info = inline_summary (to);
158 /* Look for function e->caller is inlined to. While doing
159 so work out the largest function body on the way. As
160 described above, we want to base our function growth
161 limits based on that. Not on the self size of the
162 outer function, not on the self size of inline code
163 we immediately inline to. This is the most relaxed
164 interpretation of the rule "do not grow large functions
165 too much in order to prevent compiler from exploding". */
166 while (true)
168 info = inline_summary (to);
169 if (limit < info->self_size)
170 limit = info->self_size;
171 if (stack_size_limit < info->estimated_self_stack_size)
172 stack_size_limit = info->estimated_self_stack_size;
173 if (to->global.inlined_to)
174 to = to->callers->caller;
175 else
176 break;
179 what_info = inline_summary (what);
181 if (limit < what_info->self_size)
182 limit = what_info->self_size;
184 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
186 /* Check the size after inlining against the function limits. But allow
187 the function to shrink if it went over the limits by forced inlining. */
188 newsize = estimate_size_after_inlining (to, e);
189 if (newsize >= info->size
190 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
191 && newsize > limit)
193 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
194 return false;
197 if (!what_info->estimated_stack_size)
198 return true;
200 /* FIXME: Stack size limit often prevents inlining in Fortran programs
201 due to large i/o datastructures used by the Fortran front-end.
202 We ought to ignore this limit when we know that the edge is executed
203 on every invocation of the caller (i.e. its call statement dominates
204 exit block). We do not track this information, yet. */
205 stack_size_limit += ((gcov_type)stack_size_limit
206 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
208 inlined_stack = (outer_info->stack_frame_offset
209 + outer_info->estimated_self_stack_size
210 + what_info->estimated_stack_size);
211 /* Check new stack consumption with stack consumption at the place
212 stack is used. */
213 if (inlined_stack > stack_size_limit
214 /* If function already has large stack usage from sibling
215 inline call, we can inline, too.
216 This bit overoptimistically assume that we are good at stack
217 packing. */
218 && inlined_stack > info->estimated_stack_size
219 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
221 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
222 return false;
224 return true;
227 /* Dump info about why inlining has failed. */
229 static void
230 report_inline_failed_reason (struct cgraph_edge *e)
232 if (dump_file)
234 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
235 xstrdup (e->caller->name ()), e->caller->order,
236 xstrdup (e->callee->name ()), e->callee->order,
237 cgraph_inline_failed_string (e->inline_failed));
241 /* Decide whether sanitizer-related attributes allow inlining. */
243 static bool
244 sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
246 /* Don't care if sanitizer is disabled */
247 if (!(flag_sanitize & SANITIZE_ADDRESS))
248 return true;
250 if (!caller || !callee)
251 return true;
253 return !!lookup_attribute ("no_sanitize_address",
254 DECL_ATTRIBUTES (caller)) ==
255 !!lookup_attribute ("no_sanitize_address",
256 DECL_ATTRIBUTES (callee));
259 /* Decide if we can inline the edge and possibly update
260 inline_failed reason.
261 We check whether inlining is possible at all and whether
262 caller growth limits allow doing so.
264 if REPORT is true, output reason to the dump file.
266 if DISREGARD_LIMITS is true, ignore size limits.*/
268 static bool
269 can_inline_edge_p (struct cgraph_edge *e, bool report,
270 bool disregard_limits = false)
272 bool inlinable = true;
273 enum availability avail;
274 cgraph_node *callee = e->callee->ultimate_alias_target (&avail);
275 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e->caller->decl);
276 tree callee_tree
277 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
278 struct function *caller_fun = e->caller->get_fun ();
279 struct function *callee_fun = callee ? callee->get_fun () : NULL;
281 gcc_assert (e->inline_failed);
283 if (!callee || !callee->definition)
285 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
286 inlinable = false;
288 else if (callee->calls_comdat_local)
290 e->inline_failed = CIF_USES_COMDAT_LOCAL;
291 inlinable = false;
293 else if (!inline_summary (callee)->inlinable
294 || (caller_fun && fn_contains_cilk_spawn_p (caller_fun)))
296 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
297 inlinable = false;
299 else if (avail <= AVAIL_INTERPOSABLE)
301 e->inline_failed = CIF_OVERWRITABLE;
302 inlinable = false;
304 else if (e->call_stmt_cannot_inline_p)
306 if (e->inline_failed != CIF_FUNCTION_NOT_OPTIMIZED)
307 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
308 inlinable = false;
310 /* Don't inline if the functions have different EH personalities. */
311 else if (DECL_FUNCTION_PERSONALITY (e->caller->decl)
312 && DECL_FUNCTION_PERSONALITY (callee->decl)
313 && (DECL_FUNCTION_PERSONALITY (e->caller->decl)
314 != DECL_FUNCTION_PERSONALITY (callee->decl)))
316 e->inline_failed = CIF_EH_PERSONALITY;
317 inlinable = false;
319 /* TM pure functions should not be inlined into non-TM_pure
320 functions. */
321 else if (is_tm_pure (callee->decl)
322 && !is_tm_pure (e->caller->decl))
324 e->inline_failed = CIF_UNSPECIFIED;
325 inlinable = false;
327 /* Don't inline if the callee can throw non-call exceptions but the
328 caller cannot.
329 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
330 Move the flag into cgraph node or mirror it in the inline summary. */
331 else if (callee_fun && callee_fun->can_throw_non_call_exceptions
332 && !(caller_fun && caller_fun->can_throw_non_call_exceptions))
334 e->inline_failed = CIF_NON_CALL_EXCEPTIONS;
335 inlinable = false;
337 /* Check compatibility of target optimization options. */
338 else if (!targetm.target_option.can_inline_p (e->caller->decl,
339 callee->decl))
341 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
342 inlinable = false;
344 /* Don't inline a function with mismatched sanitization attributes. */
345 else if (!sanitize_attrs_match_for_inline_p (e->caller->decl, callee->decl))
347 e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
348 inlinable = false;
350 /* Check if caller growth allows the inlining. */
351 else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
352 && !disregard_limits
353 && !lookup_attribute ("flatten",
354 DECL_ATTRIBUTES
355 (e->caller->global.inlined_to
356 ? e->caller->global.inlined_to->decl
357 : e->caller->decl))
358 && !caller_growth_limits (e))
359 inlinable = false;
360 /* Don't inline a function with a higher optimization level than the
361 caller. FIXME: this is really just tip of iceberg of handling
362 optimization attribute. */
363 else if (caller_tree != callee_tree)
365 struct cl_optimization *caller_opt
366 = TREE_OPTIMIZATION ((caller_tree)
367 ? caller_tree
368 : optimization_default_node);
370 struct cl_optimization *callee_opt
371 = TREE_OPTIMIZATION ((callee_tree)
372 ? callee_tree
373 : optimization_default_node);
375 if (((caller_opt->x_optimize > callee_opt->x_optimize)
376 || (caller_opt->x_optimize_size != callee_opt->x_optimize_size))
377 /* gcc.dg/pr43564.c. Look at forced inline even in -O0. */
378 && !DECL_DISREGARD_INLINE_LIMITS (e->callee->decl))
380 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
381 inlinable = false;
385 if (!inlinable && report)
386 report_inline_failed_reason (e);
387 return inlinable;
391 /* Return true if the edge E is inlinable during early inlining. */
393 static bool
394 can_early_inline_edge_p (struct cgraph_edge *e)
396 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
397 /* Early inliner might get called at WPA stage when IPA pass adds new
398 function. In this case we can not really do any of early inlining
399 because function bodies are missing. */
400 if (!gimple_has_body_p (callee->decl))
402 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
403 return false;
405 /* In early inliner some of callees may not be in SSA form yet
406 (i.e. the callgraph is cyclic and we did not process
407 the callee by early inliner, yet). We don't have CIF code for this
408 case; later we will re-do the decision in the real inliner. */
409 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
410 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
412 if (dump_file)
413 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
414 return false;
416 if (!can_inline_edge_p (e, true))
417 return false;
418 return true;
422 /* Return number of calls in N. Ignore cheap builtins. */
424 static int
425 num_calls (struct cgraph_node *n)
427 struct cgraph_edge *e;
428 int num = 0;
430 for (e = n->callees; e; e = e->next_callee)
431 if (!is_inexpensive_builtin (e->callee->decl))
432 num++;
433 return num;
437 /* Return true if we are interested in inlining small function. */
439 static bool
440 want_early_inline_function_p (struct cgraph_edge *e)
442 bool want_inline = true;
443 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
445 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
447 /* For AutoFDO, we need to make sure that before profile annotation, all
448 hot paths' IR look exactly the same as profiled binary. As a result,
449 in einliner, we will disregard size limit and inline those callsites
450 that are:
451 * inlined in the profiled binary, and
452 * the cloned callee has enough samples to be considered "hot". */
453 else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e))
455 else if (!DECL_DECLARED_INLINE_P (callee->decl)
456 && !flag_inline_small_functions)
458 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
459 report_inline_failed_reason (e);
460 want_inline = false;
462 else
464 int growth = estimate_edge_growth (e);
465 int n;
467 if (growth <= 0)
469 else if (!e->maybe_hot_p ()
470 && growth > 0)
472 if (dump_file)
473 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
474 "call is cold and code would grow by %i\n",
475 xstrdup (e->caller->name ()),
476 e->caller->order,
477 xstrdup (callee->name ()), callee->order,
478 growth);
479 want_inline = false;
481 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
483 if (dump_file)
484 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
485 "growth %i exceeds --param early-inlining-insns\n",
486 xstrdup (e->caller->name ()),
487 e->caller->order,
488 xstrdup (callee->name ()), callee->order,
489 growth);
490 want_inline = false;
492 else if ((n = num_calls (callee)) != 0
493 && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
495 if (dump_file)
496 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
497 "growth %i exceeds --param early-inlining-insns "
498 "divided by number of calls\n",
499 xstrdup (e->caller->name ()),
500 e->caller->order,
501 xstrdup (callee->name ()), callee->order,
502 growth);
503 want_inline = false;
506 return want_inline;
509 /* Compute time of the edge->caller + edge->callee execution when inlining
510 does not happen. */
512 inline gcov_type
513 compute_uninlined_call_time (struct inline_summary *callee_info,
514 struct cgraph_edge *edge)
516 gcov_type uninlined_call_time =
517 RDIV ((gcov_type)callee_info->time * MAX (edge->frequency, 1),
518 CGRAPH_FREQ_BASE);
519 gcov_type caller_time = inline_summary (edge->caller->global.inlined_to
520 ? edge->caller->global.inlined_to
521 : edge->caller)->time;
522 return uninlined_call_time + caller_time;
525 /* Same as compute_uinlined_call_time but compute time when inlining
526 does happen. */
528 inline gcov_type
529 compute_inlined_call_time (struct cgraph_edge *edge,
530 int edge_time)
532 gcov_type caller_time = inline_summary (edge->caller->global.inlined_to
533 ? edge->caller->global.inlined_to
534 : edge->caller)->time;
535 gcov_type time = (caller_time
536 + RDIV (((gcov_type) edge_time
537 - inline_edge_summary (edge)->call_stmt_time)
538 * MAX (edge->frequency, 1), CGRAPH_FREQ_BASE));
539 /* Possible one roundoff error, but watch for overflows. */
540 gcc_checking_assert (time >= INT_MIN / 2);
541 if (time < 0)
542 time = 0;
543 return time;
546 /* Return true if the speedup for inlining E is bigger than
547 PARAM_MAX_INLINE_MIN_SPEEDUP. */
549 static bool
550 big_speedup_p (struct cgraph_edge *e)
552 gcov_type time = compute_uninlined_call_time (inline_summary (e->callee),
554 gcov_type inlined_time = compute_inlined_call_time (e,
555 estimate_edge_time (e));
556 if (time - inlined_time
557 > RDIV (time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP), 100))
558 return true;
559 return false;
562 /* Return true if we are interested in inlining small function.
563 When REPORT is true, report reason to dump file. */
565 static bool
566 want_inline_small_function_p (struct cgraph_edge *e, bool report)
568 bool want_inline = true;
569 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
571 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
573 else if (!DECL_DECLARED_INLINE_P (callee->decl)
574 && !flag_inline_small_functions)
576 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
577 want_inline = false;
579 /* Do fast and conservative check if the function can be good
580 inline cnadidate. At themoment we allow inline hints to
581 promote non-inline function to inline and we increase
582 MAX_INLINE_INSNS_SINGLE 16fold for inline functions. */
583 else if ((!DECL_DECLARED_INLINE_P (callee->decl)
584 && (!e->count || !e->maybe_hot_p ()))
585 && inline_summary (callee)->min_size - inline_edge_summary (e)->call_stmt_size
586 > MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
588 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
589 want_inline = false;
591 else if ((DECL_DECLARED_INLINE_P (callee->decl) || e->count)
592 && inline_summary (callee)->min_size - inline_edge_summary (e)->call_stmt_size
593 > 16 * MAX_INLINE_INSNS_SINGLE)
595 e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
596 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
597 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
598 want_inline = false;
600 else
602 int growth = estimate_edge_growth (e);
603 inline_hints hints = estimate_edge_hints (e);
604 bool big_speedup = big_speedup_p (e);
606 if (growth <= 0)
608 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
609 hints suggests that inlining given function is very profitable. */
610 else if (DECL_DECLARED_INLINE_P (callee->decl)
611 && growth >= MAX_INLINE_INSNS_SINGLE
612 && ((!big_speedup
613 && !(hints & (INLINE_HINT_indirect_call
614 | INLINE_HINT_known_hot
615 | INLINE_HINT_loop_iterations
616 | INLINE_HINT_array_index
617 | INLINE_HINT_loop_stride)))
618 || growth >= MAX_INLINE_INSNS_SINGLE * 16))
620 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
621 want_inline = false;
623 else if (!DECL_DECLARED_INLINE_P (callee->decl)
624 && !flag_inline_functions)
626 /* growth_likely_positive is expensive, always test it last. */
627 if (growth >= MAX_INLINE_INSNS_SINGLE
628 || growth_likely_positive (callee, growth))
630 e->inline_failed = CIF_NOT_DECLARED_INLINED;
631 want_inline = false;
634 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
635 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
636 inlining given function is very profitable. */
637 else if (!DECL_DECLARED_INLINE_P (callee->decl)
638 && !big_speedup
639 && !(hints & INLINE_HINT_known_hot)
640 && growth >= ((hints & (INLINE_HINT_indirect_call
641 | INLINE_HINT_loop_iterations
642 | INLINE_HINT_array_index
643 | INLINE_HINT_loop_stride))
644 ? MAX (MAX_INLINE_INSNS_AUTO,
645 MAX_INLINE_INSNS_SINGLE)
646 : MAX_INLINE_INSNS_AUTO))
648 /* growth_likely_positive is expensive, always test it last. */
649 if (growth >= MAX_INLINE_INSNS_SINGLE
650 || growth_likely_positive (callee, growth))
652 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
653 want_inline = false;
656 /* If call is cold, do not inline when function body would grow. */
657 else if (!e->maybe_hot_p ()
658 && (growth >= MAX_INLINE_INSNS_SINGLE
659 || growth_likely_positive (callee, growth)))
661 e->inline_failed = CIF_UNLIKELY_CALL;
662 want_inline = false;
665 if (!want_inline && report)
666 report_inline_failed_reason (e);
667 return want_inline;
670 /* EDGE is self recursive edge.
671 We hand two cases - when function A is inlining into itself
672 or when function A is being inlined into another inliner copy of function
673 A within function B.
675 In first case OUTER_NODE points to the toplevel copy of A, while
676 in the second case OUTER_NODE points to the outermost copy of A in B.
678 In both cases we want to be extra selective since
679 inlining the call will just introduce new recursive calls to appear. */
681 static bool
682 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
683 struct cgraph_node *outer_node,
684 bool peeling,
685 int depth)
687 char const *reason = NULL;
688 bool want_inline = true;
689 int caller_freq = CGRAPH_FREQ_BASE;
690 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
692 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
693 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
695 if (!edge->maybe_hot_p ())
697 reason = "recursive call is cold";
698 want_inline = false;
700 else if (max_count && !outer_node->count)
702 reason = "not executed in profile";
703 want_inline = false;
705 else if (depth > max_depth)
707 reason = "--param max-inline-recursive-depth exceeded.";
708 want_inline = false;
711 if (outer_node->global.inlined_to)
712 caller_freq = outer_node->callers->frequency;
714 if (!caller_freq)
716 reason = "function is inlined and unlikely";
717 want_inline = false;
720 if (!want_inline)
722 /* Inlining of self recursive function into copy of itself within other function
723 is transformation similar to loop peeling.
725 Peeling is profitable if we can inline enough copies to make probability
726 of actual call to the self recursive function very small. Be sure that
727 the probability of recursion is small.
729 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
730 This way the expected number of recision is at most max_depth. */
731 else if (peeling)
733 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
734 / max_depth);
735 int i;
736 for (i = 1; i < depth; i++)
737 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
738 if (max_count
739 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
740 >= max_prob))
742 reason = "profile of recursive call is too large";
743 want_inline = false;
745 if (!max_count
746 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
747 >= max_prob))
749 reason = "frequency of recursive call is too large";
750 want_inline = false;
753 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
754 depth is large. We reduce function call overhead and increase chances that
755 things fit in hardware return predictor.
757 Recursive inlining might however increase cost of stack frame setup
758 actually slowing down functions whose recursion tree is wide rather than
759 deep.
761 Deciding reliably on when to do recursive inlining without profile feedback
762 is tricky. For now we disable recursive inlining when probability of self
763 recursion is low.
765 Recursive inlining of self recursive call within loop also results in large loop
766 depths that generally optimize badly. We may want to throttle down inlining
767 in those cases. In particular this seems to happen in one of libstdc++ rb tree
768 methods. */
769 else
771 if (max_count
772 && (edge->count * 100 / outer_node->count
773 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
775 reason = "profile of recursive call is too small";
776 want_inline = false;
778 else if (!max_count
779 && (edge->frequency * 100 / caller_freq
780 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
782 reason = "frequency of recursive call is too small";
783 want_inline = false;
786 if (!want_inline && dump_file)
787 fprintf (dump_file, " not inlining recursively: %s\n", reason);
788 return want_inline;
791 /* Return true when NODE has uninlinable caller;
792 set HAS_HOT_CALL if it has hot call.
793 Worker for cgraph_for_node_and_aliases. */
795 static bool
796 check_callers (struct cgraph_node *node, void *has_hot_call)
798 struct cgraph_edge *e;
799 for (e = node->callers; e; e = e->next_caller)
801 if (!can_inline_edge_p (e, true))
802 return true;
803 if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
804 *(bool *)has_hot_call = true;
806 return false;
809 /* If NODE has a caller, return true. */
811 static bool
812 has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
814 if (node->callers)
815 return true;
816 return false;
819 /* Decide if inlining NODE would reduce unit size by eliminating
820 the offline copy of function.
821 When COLD is true the cold calls are considered, too. */
823 static bool
824 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
826 struct cgraph_node *function = node->ultimate_alias_target ();
827 bool has_hot_call = false;
829 /* Does it have callers? */
830 if (!node->call_for_symbol_thunks_and_aliases (has_caller_p, NULL, true))
831 return false;
832 /* Already inlined? */
833 if (function->global.inlined_to)
834 return false;
835 if (node->ultimate_alias_target () != node)
836 return false;
837 /* Inlining into all callers would increase size? */
838 if (estimate_growth (node) > 0)
839 return false;
840 /* All inlines must be possible. */
841 if (node->call_for_symbol_thunks_and_aliases
842 (check_callers, &has_hot_call, true))
843 return false;
844 if (!cold && !has_hot_call)
845 return false;
846 return true;
849 #define RELATIVE_TIME_BENEFIT_RANGE (INT_MAX / 64)
851 /* Return relative time improvement for inlining EDGE in range
852 1...RELATIVE_TIME_BENEFIT_RANGE */
854 static inline int
855 relative_time_benefit (struct inline_summary *callee_info,
856 struct cgraph_edge *edge,
857 int edge_time)
859 gcov_type relbenefit;
860 gcov_type uninlined_call_time = compute_uninlined_call_time (callee_info, edge);
861 gcov_type inlined_call_time = compute_inlined_call_time (edge, edge_time);
863 /* Inlining into extern inline function is not a win. */
864 if (DECL_EXTERNAL (edge->caller->global.inlined_to
865 ? edge->caller->global.inlined_to->decl
866 : edge->caller->decl))
867 return 1;
869 /* Watch overflows. */
870 gcc_checking_assert (uninlined_call_time >= 0);
871 gcc_checking_assert (inlined_call_time >= 0);
872 gcc_checking_assert (uninlined_call_time >= inlined_call_time);
874 /* Compute relative time benefit, i.e. how much the call becomes faster.
875 ??? perhaps computing how much the caller+calle together become faster
876 would lead to more realistic results. */
877 if (!uninlined_call_time)
878 uninlined_call_time = 1;
879 relbenefit =
880 RDIV (((gcov_type)uninlined_call_time - inlined_call_time) * RELATIVE_TIME_BENEFIT_RANGE,
881 uninlined_call_time);
882 relbenefit = MIN (relbenefit, RELATIVE_TIME_BENEFIT_RANGE);
883 gcc_checking_assert (relbenefit >= 0);
884 relbenefit = MAX (relbenefit, 1);
885 return relbenefit;
889 /* A cost model driving the inlining heuristics in a way so the edges with
890 smallest badness are inlined first. After each inlining is performed
891 the costs of all caller edges of nodes affected are recomputed so the
892 metrics may accurately depend on values such as number of inlinable callers
893 of the function or function body size. */
895 static int
896 edge_badness (struct cgraph_edge *edge, bool dump)
898 gcov_type badness;
899 int growth, edge_time;
900 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
901 struct inline_summary *callee_info = inline_summary (callee);
902 inline_hints hints;
904 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
905 return INT_MIN;
907 growth = estimate_edge_growth (edge);
908 edge_time = estimate_edge_time (edge);
909 hints = estimate_edge_hints (edge);
910 gcc_checking_assert (edge_time >= 0);
911 gcc_checking_assert (edge_time <= callee_info->time);
912 gcc_checking_assert (growth <= callee_info->size);
914 if (dump)
916 fprintf (dump_file, " Badness calculation for %s/%i -> %s/%i\n",
917 xstrdup (edge->caller->name ()),
918 edge->caller->order,
919 xstrdup (callee->name ()),
920 edge->callee->order);
921 fprintf (dump_file, " size growth %i, time %i ",
922 growth,
923 edge_time);
924 dump_inline_hints (dump_file, hints);
925 if (big_speedup_p (edge))
926 fprintf (dump_file, " big_speedup");
927 fprintf (dump_file, "\n");
930 /* Always prefer inlining saving code size. */
931 if (growth <= 0)
933 badness = INT_MIN / 2 + growth;
934 if (dump)
935 fprintf (dump_file, " %i: Growth %i <= 0\n", (int) badness,
936 growth);
939 /* When profiling is available, compute badness as:
941 relative_edge_count * relative_time_benefit
942 goodness = -------------------------------------------
943 growth_f_caller
944 badness = -goodness
946 The fraction is upside down, because on edge counts and time beneits
947 the bounds are known. Edge growth is essentially unlimited. */
949 else if (max_count)
951 sreal tmp, relbenefit_real, growth_real;
952 int relbenefit = relative_time_benefit (callee_info, edge, edge_time);
953 /* Capping edge->count to max_count. edge->count can be larger than
954 max_count if an inline adds new edges which increase max_count
955 after max_count is computed. */
956 gcov_type edge_count = edge->count > max_count ? max_count : edge->count;
958 sreal_init (&relbenefit_real, relbenefit, 0);
959 sreal_init (&growth_real, growth, 0);
961 /* relative_edge_count. */
962 sreal_init (&tmp, edge_count, 0);
963 sreal_div (&tmp, &tmp, &max_count_real);
965 /* relative_time_benefit. */
966 sreal_mul (&tmp, &tmp, &relbenefit_real);
967 sreal_div (&tmp, &tmp, &max_relbenefit_real);
969 /* growth_f_caller. */
970 sreal_mul (&tmp, &tmp, &half_int_min_real);
971 sreal_div (&tmp, &tmp, &growth_real);
973 badness = -1 * sreal_to_int (&tmp);
975 if (dump)
977 fprintf (dump_file,
978 " %i (relative %f): profile info. Relative count %f%s"
979 " * Relative benefit %f\n",
980 (int) badness, (double) badness / INT_MIN,
981 (double) edge_count / max_count,
982 edge->count > max_count ? " (capped to max_count)" : "",
983 relbenefit * 100.0 / RELATIVE_TIME_BENEFIT_RANGE);
987 /* When function local profile is available. Compute badness as:
989 relative_time_benefit
990 goodness = ---------------------------------
991 growth_of_caller * overall_growth
993 badness = - goodness
995 compensated by the inline hints.
997 else if (flag_guess_branch_prob)
999 badness = (relative_time_benefit (callee_info, edge, edge_time)
1000 * (INT_MIN / 16 / RELATIVE_TIME_BENEFIT_RANGE));
1001 badness /= (MIN (65536/2, growth) * MIN (65536/2, MAX (1, callee_info->growth)));
1002 gcc_checking_assert (badness <=0 && badness >= INT_MIN / 16);
1003 if ((hints & (INLINE_HINT_indirect_call
1004 | INLINE_HINT_loop_iterations
1005 | INLINE_HINT_array_index
1006 | INLINE_HINT_loop_stride))
1007 || callee_info->growth <= 0)
1008 badness *= 8;
1009 if (hints & (INLINE_HINT_same_scc))
1010 badness /= 16;
1011 else if (hints & (INLINE_HINT_in_scc))
1012 badness /= 8;
1013 else if (hints & (INLINE_HINT_cross_module))
1014 badness /= 2;
1015 gcc_checking_assert (badness <= 0 && badness >= INT_MIN / 2);
1016 if ((hints & INLINE_HINT_declared_inline) && badness >= INT_MIN / 32)
1017 badness *= 16;
1018 if (dump)
1020 fprintf (dump_file,
1021 " %i: guessed profile. frequency %f,"
1022 " benefit %f%%, time w/o inlining %i, time w inlining %i"
1023 " overall growth %i (current) %i (original)\n",
1024 (int) badness, (double)edge->frequency / CGRAPH_FREQ_BASE,
1025 relative_time_benefit (callee_info, edge, edge_time) * 100.0
1026 / RELATIVE_TIME_BENEFIT_RANGE,
1027 (int)compute_uninlined_call_time (callee_info, edge),
1028 (int)compute_inlined_call_time (edge, edge_time),
1029 estimate_growth (callee),
1030 callee_info->growth);
1033 /* When function local profile is not available or it does not give
1034 useful information (ie frequency is zero), base the cost on
1035 loop nest and overall size growth, so we optimize for overall number
1036 of functions fully inlined in program. */
1037 else
1039 int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
1040 badness = growth * 256;
1042 /* Decrease badness if call is nested. */
1043 if (badness > 0)
1044 badness >>= nest;
1045 else
1047 badness <<= nest;
1049 if (dump)
1050 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
1051 nest);
1054 /* Ensure that we did not overflow in all the fixed point math above. */
1055 gcc_assert (badness >= INT_MIN);
1056 gcc_assert (badness <= INT_MAX - 1);
1057 /* Make recursive inlining happen always after other inlining is done. */
1058 if (edge->recursive_p ())
1059 return badness + 1;
1060 else
1061 return badness;
1064 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1065 static inline void
1066 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
1068 int badness = edge_badness (edge, false);
1069 if (edge->aux)
1071 fibnode_t n = (fibnode_t) edge->aux;
1072 gcc_checking_assert (n->data == edge);
1074 /* fibheap_replace_key only decrease the keys.
1075 When we increase the key we do not update heap
1076 and instead re-insert the element once it becomes
1077 a minimum of heap. */
1078 if (badness < n->key)
1080 if (dump_file && (dump_flags & TDF_DETAILS))
1082 fprintf (dump_file,
1083 " decreasing badness %s/%i -> %s/%i, %i to %i\n",
1084 xstrdup (edge->caller->name ()),
1085 edge->caller->order,
1086 xstrdup (edge->callee->name ()),
1087 edge->callee->order,
1088 (int)n->key,
1089 badness);
1091 fibheap_replace_key (heap, n, badness);
1092 gcc_checking_assert (n->key == badness);
1095 else
1097 if (dump_file && (dump_flags & TDF_DETAILS))
1099 fprintf (dump_file,
1100 " enqueuing call %s/%i -> %s/%i, badness %i\n",
1101 xstrdup (edge->caller->name ()),
1102 edge->caller->order,
1103 xstrdup (edge->callee->name ()),
1104 edge->callee->order,
1105 badness);
1107 edge->aux = fibheap_insert (heap, badness, edge);
1112 /* NODE was inlined.
1113 All caller edges needs to be resetted because
1114 size estimates change. Similarly callees needs reset
1115 because better context may be known. */
1117 static void
1118 reset_edge_caches (struct cgraph_node *node)
1120 struct cgraph_edge *edge;
1121 struct cgraph_edge *e = node->callees;
1122 struct cgraph_node *where = node;
1123 struct ipa_ref *ref;
1125 if (where->global.inlined_to)
1126 where = where->global.inlined_to;
1128 /* WHERE body size has changed, the cached growth is invalid. */
1129 reset_node_growth_cache (where);
1131 for (edge = where->callers; edge; edge = edge->next_caller)
1132 if (edge->inline_failed)
1133 reset_edge_growth_cache (edge);
1135 FOR_EACH_ALIAS (where, ref)
1136 reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
1138 if (!e)
1139 return;
1141 while (true)
1142 if (!e->inline_failed && e->callee->callees)
1143 e = e->callee->callees;
1144 else
1146 if (e->inline_failed)
1147 reset_edge_growth_cache (e);
1148 if (e->next_callee)
1149 e = e->next_callee;
1150 else
1154 if (e->caller == node)
1155 return;
1156 e = e->caller->callers;
1158 while (!e->next_callee);
1159 e = e->next_callee;
1164 /* Recompute HEAP nodes for each of caller of NODE.
1165 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1166 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1167 it is inlinable. Otherwise check all edges. */
1169 static void
1170 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
1171 bitmap updated_nodes,
1172 struct cgraph_edge *check_inlinablity_for)
1174 struct cgraph_edge *edge;
1175 struct ipa_ref *ref;
1177 if ((!node->alias && !inline_summary (node)->inlinable)
1178 || node->global.inlined_to)
1179 return;
1180 if (!bitmap_set_bit (updated_nodes, node->uid))
1181 return;
1183 FOR_EACH_ALIAS (node, ref)
1185 struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
1186 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1189 for (edge = node->callers; edge; edge = edge->next_caller)
1190 if (edge->inline_failed)
1192 if (!check_inlinablity_for
1193 || check_inlinablity_for == edge)
1195 if (can_inline_edge_p (edge, false)
1196 && want_inline_small_function_p (edge, false))
1197 update_edge_key (heap, edge);
1198 else if (edge->aux)
1200 report_inline_failed_reason (edge);
1201 fibheap_delete_node (heap, (fibnode_t) edge->aux);
1202 edge->aux = NULL;
1205 else if (edge->aux)
1206 update_edge_key (heap, edge);
1210 /* Recompute HEAP nodes for each uninlined call in NODE.
1211 This is used when we know that edge badnesses are going only to increase
1212 (we introduced new call site) and thus all we need is to insert newly
1213 created edges into heap. */
1215 static void
1216 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
1217 bitmap updated_nodes)
1219 struct cgraph_edge *e = node->callees;
1221 if (!e)
1222 return;
1223 while (true)
1224 if (!e->inline_failed && e->callee->callees)
1225 e = e->callee->callees;
1226 else
1228 enum availability avail;
1229 struct cgraph_node *callee;
1230 /* We do not reset callee growth cache here. Since we added a new call,
1231 growth chould have just increased and consequentely badness metric
1232 don't need updating. */
1233 if (e->inline_failed
1234 && (callee = e->callee->ultimate_alias_target (&avail))
1235 && inline_summary (callee)->inlinable
1236 && avail >= AVAIL_AVAILABLE
1237 && !bitmap_bit_p (updated_nodes, callee->uid))
1239 if (can_inline_edge_p (e, false)
1240 && want_inline_small_function_p (e, false))
1241 update_edge_key (heap, e);
1242 else if (e->aux)
1244 report_inline_failed_reason (e);
1245 fibheap_delete_node (heap, (fibnode_t) e->aux);
1246 e->aux = NULL;
1249 if (e->next_callee)
1250 e = e->next_callee;
1251 else
1255 if (e->caller == node)
1256 return;
1257 e = e->caller->callers;
1259 while (!e->next_callee);
1260 e = e->next_callee;
1265 /* Enqueue all recursive calls from NODE into priority queue depending on
1266 how likely we want to recursively inline the call. */
1268 static void
1269 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1270 fibheap_t heap)
1272 struct cgraph_edge *e;
1273 enum availability avail;
1275 for (e = where->callees; e; e = e->next_callee)
1276 if (e->callee == node
1277 || (e->callee->ultimate_alias_target (&avail) == node
1278 && avail > AVAIL_INTERPOSABLE))
1280 /* When profile feedback is available, prioritize by expected number
1281 of calls. */
1282 fibheap_insert (heap,
1283 !max_count ? -e->frequency
1284 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
1287 for (e = where->callees; e; e = e->next_callee)
1288 if (!e->inline_failed)
1289 lookup_recursive_calls (node, e->callee, heap);
1292 /* Decide on recursive inlining: in the case function has recursive calls,
1293 inline until body size reaches given argument. If any new indirect edges
1294 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1295 is NULL. */
1297 static bool
1298 recursive_inlining (struct cgraph_edge *edge,
1299 vec<cgraph_edge *> *new_edges)
1301 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1302 fibheap_t heap;
1303 struct cgraph_node *node;
1304 struct cgraph_edge *e;
1305 struct cgraph_node *master_clone = NULL, *next;
1306 int depth = 0;
1307 int n = 0;
1309 node = edge->caller;
1310 if (node->global.inlined_to)
1311 node = node->global.inlined_to;
1313 if (DECL_DECLARED_INLINE_P (node->decl))
1314 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1316 /* Make sure that function is small enough to be considered for inlining. */
1317 if (estimate_size_after_inlining (node, edge) >= limit)
1318 return false;
1319 heap = fibheap_new ();
1320 lookup_recursive_calls (node, node, heap);
1321 if (fibheap_empty (heap))
1323 fibheap_delete (heap);
1324 return false;
1327 if (dump_file)
1328 fprintf (dump_file,
1329 " Performing recursive inlining on %s\n",
1330 node->name ());
1332 /* Do the inlining and update list of recursive call during process. */
1333 while (!fibheap_empty (heap))
1335 struct cgraph_edge *curr
1336 = (struct cgraph_edge *) fibheap_extract_min (heap);
1337 struct cgraph_node *cnode, *dest = curr->callee;
1339 if (!can_inline_edge_p (curr, true))
1340 continue;
1342 /* MASTER_CLONE is produced in the case we already started modified
1343 the function. Be sure to redirect edge to the original body before
1344 estimating growths otherwise we will be seeing growths after inlining
1345 the already modified body. */
1346 if (master_clone)
1348 curr->redirect_callee (master_clone);
1349 reset_edge_growth_cache (curr);
1352 if (estimate_size_after_inlining (node, curr) > limit)
1354 curr->redirect_callee (dest);
1355 reset_edge_growth_cache (curr);
1356 break;
1359 depth = 1;
1360 for (cnode = curr->caller;
1361 cnode->global.inlined_to; cnode = cnode->callers->caller)
1362 if (node->decl
1363 == curr->callee->ultimate_alias_target ()->decl)
1364 depth++;
1366 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1368 curr->redirect_callee (dest);
1369 reset_edge_growth_cache (curr);
1370 continue;
1373 if (dump_file)
1375 fprintf (dump_file,
1376 " Inlining call of depth %i", depth);
1377 if (node->count)
1379 fprintf (dump_file, " called approx. %.2f times per call",
1380 (double)curr->count / node->count);
1382 fprintf (dump_file, "\n");
1384 if (!master_clone)
1386 /* We need original clone to copy around. */
1387 master_clone = node->create_clone (node->decl, node->count,
1388 CGRAPH_FREQ_BASE, false, vNULL,
1389 true, NULL, NULL);
1390 for (e = master_clone->callees; e; e = e->next_callee)
1391 if (!e->inline_failed)
1392 clone_inlined_nodes (e, true, false, NULL, CGRAPH_FREQ_BASE);
1393 curr->redirect_callee (master_clone);
1394 reset_edge_growth_cache (curr);
1397 inline_call (curr, false, new_edges, &overall_size, true);
1398 lookup_recursive_calls (node, curr->callee, heap);
1399 n++;
1402 if (!fibheap_empty (heap) && dump_file)
1403 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1404 fibheap_delete (heap);
1406 if (!master_clone)
1407 return false;
1409 if (dump_file)
1410 fprintf (dump_file,
1411 "\n Inlined %i times, "
1412 "body grown from size %i to %i, time %i to %i\n", n,
1413 inline_summary (master_clone)->size, inline_summary (node)->size,
1414 inline_summary (master_clone)->time, inline_summary (node)->time);
1416 /* Remove master clone we used for inlining. We rely that clones inlined
1417 into master clone gets queued just before master clone so we don't
1418 need recursion. */
1419 for (node = symtab->first_function (); node != master_clone;
1420 node = next)
1422 next = symtab->next_function (node);
1423 if (node->global.inlined_to == master_clone)
1424 node->remove ();
1426 master_clone->remove ();
1427 return true;
1431 /* Given whole compilation unit estimate of INSNS, compute how large we can
1432 allow the unit to grow. */
1434 static int
1435 compute_max_insns (int insns)
1437 int max_insns = insns;
1438 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1439 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1441 return ((int64_t) max_insns
1442 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1446 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1448 static void
1449 add_new_edges_to_heap (fibheap_t heap, vec<cgraph_edge *> new_edges)
1451 while (new_edges.length () > 0)
1453 struct cgraph_edge *edge = new_edges.pop ();
1455 gcc_assert (!edge->aux);
1456 if (edge->inline_failed
1457 && can_inline_edge_p (edge, true)
1458 && want_inline_small_function_p (edge, true))
1459 edge->aux = fibheap_insert (heap, edge_badness (edge, false), edge);
1463 /* Remove EDGE from the fibheap. */
1465 static void
1466 heap_edge_removal_hook (struct cgraph_edge *e, void *data)
1468 if (e->callee)
1469 reset_node_growth_cache (e->callee);
1470 if (e->aux)
1472 fibheap_delete_node ((fibheap_t)data, (fibnode_t)e->aux);
1473 e->aux = NULL;
1477 /* Return true if speculation of edge E seems useful.
1478 If ANTICIPATE_INLINING is true, be conservative and hope that E
1479 may get inlined. */
1481 bool
1482 speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
1484 enum availability avail;
1485 struct cgraph_node *target = e->callee->ultimate_alias_target (&avail);
1486 struct cgraph_edge *direct, *indirect;
1487 struct ipa_ref *ref;
1489 gcc_assert (e->speculative && !e->indirect_unknown_callee);
1491 if (!e->maybe_hot_p ())
1492 return false;
1494 /* See if IP optimizations found something potentially useful about the
1495 function. For now we look only for CONST/PURE flags. Almost everything
1496 else we propagate is useless. */
1497 if (avail >= AVAIL_AVAILABLE)
1499 int ecf_flags = flags_from_decl_or_type (target->decl);
1500 if (ecf_flags & ECF_CONST)
1502 e->speculative_call_info (direct, indirect, ref);
1503 if (!(indirect->indirect_info->ecf_flags & ECF_CONST))
1504 return true;
1506 else if (ecf_flags & ECF_PURE)
1508 e->speculative_call_info (direct, indirect, ref);
1509 if (!(indirect->indirect_info->ecf_flags & ECF_PURE))
1510 return true;
1513 /* If we did not managed to inline the function nor redirect
1514 to an ipa-cp clone (that are seen by having local flag set),
1515 it is probably pointless to inline it unless hardware is missing
1516 indirect call predictor. */
1517 if (!anticipate_inlining && e->inline_failed && !target->local.local)
1518 return false;
1519 /* For overwritable targets there is not much to do. */
1520 if (e->inline_failed && !can_inline_edge_p (e, false, true))
1521 return false;
1522 /* OK, speculation seems interesting. */
1523 return true;
1526 /* We know that EDGE is not going to be inlined.
1527 See if we can remove speculation. */
1529 static void
1530 resolve_noninline_speculation (fibheap_t edge_heap, struct cgraph_edge *edge)
1532 if (edge->speculative && !speculation_useful_p (edge, false))
1534 struct cgraph_node *node = edge->caller;
1535 struct cgraph_node *where = node->global.inlined_to
1536 ? node->global.inlined_to : node;
1537 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1539 spec_rem += edge->count;
1540 edge->resolve_speculation ();
1541 reset_edge_caches (where);
1542 inline_update_overall_summary (where);
1543 update_caller_keys (edge_heap, where,
1544 updated_nodes, NULL);
1545 update_callee_keys (edge_heap, where,
1546 updated_nodes);
1547 BITMAP_FREE (updated_nodes);
1551 /* We use greedy algorithm for inlining of small functions:
1552 All inline candidates are put into prioritized heap ordered in
1553 increasing badness.
1555 The inlining of small functions is bounded by unit growth parameters. */
1557 static void
1558 inline_small_functions (void)
1560 struct cgraph_node *node;
1561 struct cgraph_edge *edge;
1562 fibheap_t edge_heap = fibheap_new ();
1563 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1564 int min_size, max_size;
1565 auto_vec<cgraph_edge *> new_indirect_edges;
1566 int initial_size = 0;
1567 struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
1568 struct cgraph_edge_hook_list *edge_removal_hook_holder;
1569 if (flag_indirect_inlining)
1570 new_indirect_edges.create (8);
1572 edge_removal_hook_holder
1573 = symtab->add_edge_removal_hook (&heap_edge_removal_hook, edge_heap);
1575 /* Compute overall unit size and other global parameters used by badness
1576 metrics. */
1578 max_count = 0;
1579 ipa_reduced_postorder (order, true, true, NULL);
1580 free (order);
1582 FOR_EACH_DEFINED_FUNCTION (node)
1583 if (!node->global.inlined_to)
1585 if (node->has_gimple_body_p ()
1586 || node->thunk.thunk_p)
1588 struct inline_summary *info = inline_summary (node);
1589 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
1591 /* Do not account external functions, they will be optimized out
1592 if not inlined. Also only count the non-cold portion of program. */
1593 if (!DECL_EXTERNAL (node->decl)
1594 && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED)
1595 initial_size += info->size;
1596 info->growth = estimate_growth (node);
1597 if (dfs && dfs->next_cycle)
1599 struct cgraph_node *n2;
1600 int id = dfs->scc_no + 1;
1601 for (n2 = node; n2;
1602 n2 = ((struct ipa_dfs_info *) node->aux)->next_cycle)
1604 struct inline_summary *info2 = inline_summary (n2);
1605 if (info2->scc_no)
1606 break;
1607 info2->scc_no = id;
1612 for (edge = node->callers; edge; edge = edge->next_caller)
1613 if (max_count < edge->count)
1614 max_count = edge->count;
1616 sreal_init (&max_count_real, max_count, 0);
1617 sreal_init (&max_relbenefit_real, RELATIVE_TIME_BENEFIT_RANGE, 0);
1618 sreal_init (&half_int_min_real, INT_MAX / 2, 0);
1619 ipa_free_postorder_info ();
1620 initialize_growth_caches ();
1622 if (dump_file)
1623 fprintf (dump_file,
1624 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1625 initial_size);
1627 overall_size = initial_size;
1628 max_size = compute_max_insns (overall_size);
1629 min_size = overall_size;
1631 /* Populate the heap with all edges we might inline. */
1633 FOR_EACH_DEFINED_FUNCTION (node)
1635 bool update = false;
1636 struct cgraph_edge *next;
1638 if (dump_file)
1639 fprintf (dump_file, "Enqueueing calls in %s/%i.\n",
1640 node->name (), node->order);
1642 for (edge = node->callees; edge; edge = next)
1644 next = edge->next_callee;
1645 if (edge->inline_failed
1646 && !edge->aux
1647 && can_inline_edge_p (edge, true)
1648 && want_inline_small_function_p (edge, true)
1649 && edge->inline_failed)
1651 gcc_assert (!edge->aux);
1652 update_edge_key (edge_heap, edge);
1654 if (edge->speculative && !speculation_useful_p (edge, edge->aux != NULL))
1656 edge->resolve_speculation ();
1657 update = true;
1660 if (update)
1662 struct cgraph_node *where = node->global.inlined_to
1663 ? node->global.inlined_to : node;
1664 inline_update_overall_summary (where);
1665 reset_node_growth_cache (where);
1666 reset_edge_caches (where);
1667 update_caller_keys (edge_heap, where,
1668 updated_nodes, NULL);
1669 bitmap_clear (updated_nodes);
1673 gcc_assert (in_lto_p
1674 || !max_count
1675 || (profile_info && flag_branch_probabilities));
1677 while (!fibheap_empty (edge_heap))
1679 int old_size = overall_size;
1680 struct cgraph_node *where, *callee;
1681 int badness = fibheap_min_key (edge_heap);
1682 int current_badness;
1683 int cached_badness;
1684 int growth;
1686 edge = (struct cgraph_edge *) fibheap_extract_min (edge_heap);
1687 gcc_assert (edge->aux);
1688 edge->aux = NULL;
1689 if (!edge->inline_failed || !edge->callee->analyzed)
1690 continue;
1692 /* Be sure that caches are maintained consistent.
1693 We can not make this ENABLE_CHECKING only because it cause different
1694 updates of the fibheap queue. */
1695 cached_badness = edge_badness (edge, false);
1696 reset_edge_growth_cache (edge);
1697 reset_node_growth_cache (edge->callee);
1699 /* When updating the edge costs, we only decrease badness in the keys.
1700 Increases of badness are handled lazilly; when we see key with out
1701 of date value on it, we re-insert it now. */
1702 current_badness = edge_badness (edge, false);
1703 gcc_assert (cached_badness == current_badness);
1704 gcc_assert (current_badness >= badness);
1705 if (current_badness != badness)
1707 edge->aux = fibheap_insert (edge_heap, current_badness, edge);
1708 continue;
1711 if (!can_inline_edge_p (edge, true))
1713 resolve_noninline_speculation (edge_heap, edge);
1714 continue;
1717 callee = edge->callee->ultimate_alias_target ();
1718 growth = estimate_edge_growth (edge);
1719 if (dump_file)
1721 fprintf (dump_file,
1722 "\nConsidering %s/%i with %i size\n",
1723 callee->name (), callee->order,
1724 inline_summary (callee)->size);
1725 fprintf (dump_file,
1726 " to be inlined into %s/%i in %s:%i\n"
1727 " Estimated badness is %i, frequency %.2f.\n",
1728 edge->caller->name (), edge->caller->order,
1729 flag_wpa ? "unknown"
1730 : gimple_filename ((const_gimple) edge->call_stmt),
1731 flag_wpa ? -1
1732 : gimple_lineno ((const_gimple) edge->call_stmt),
1733 badness,
1734 edge->frequency / (double)CGRAPH_FREQ_BASE);
1735 if (edge->count)
1736 fprintf (dump_file," Called %"PRId64"x\n",
1737 edge->count);
1738 if (dump_flags & TDF_DETAILS)
1739 edge_badness (edge, true);
1742 if (overall_size + growth > max_size
1743 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1745 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1746 report_inline_failed_reason (edge);
1747 resolve_noninline_speculation (edge_heap, edge);
1748 continue;
1751 if (!want_inline_small_function_p (edge, true))
1753 resolve_noninline_speculation (edge_heap, edge);
1754 continue;
1757 /* Heuristics for inlining small functions work poorly for
1758 recursive calls where we do effects similar to loop unrolling.
1759 When inlining such edge seems profitable, leave decision on
1760 specific inliner. */
1761 if (edge->recursive_p ())
1763 where = edge->caller;
1764 if (where->global.inlined_to)
1765 where = where->global.inlined_to;
1766 if (!recursive_inlining (edge,
1767 flag_indirect_inlining
1768 ? &new_indirect_edges : NULL))
1770 edge->inline_failed = CIF_RECURSIVE_INLINING;
1771 resolve_noninline_speculation (edge_heap, edge);
1772 continue;
1774 reset_edge_caches (where);
1775 /* Recursive inliner inlines all recursive calls of the function
1776 at once. Consequently we need to update all callee keys. */
1777 if (flag_indirect_inlining)
1778 add_new_edges_to_heap (edge_heap, new_indirect_edges);
1779 update_callee_keys (edge_heap, where, updated_nodes);
1780 bitmap_clear (updated_nodes);
1782 else
1784 struct cgraph_node *outer_node = NULL;
1785 int depth = 0;
1787 /* Consider the case where self recursive function A is inlined
1788 into B. This is desired optimization in some cases, since it
1789 leads to effect similar of loop peeling and we might completely
1790 optimize out the recursive call. However we must be extra
1791 selective. */
1793 where = edge->caller;
1794 while (where->global.inlined_to)
1796 if (where->decl == callee->decl)
1797 outer_node = where, depth++;
1798 where = where->callers->caller;
1800 if (outer_node
1801 && !want_inline_self_recursive_call_p (edge, outer_node,
1802 true, depth))
1804 edge->inline_failed
1805 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
1806 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1807 resolve_noninline_speculation (edge_heap, edge);
1808 continue;
1810 else if (depth && dump_file)
1811 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
1813 gcc_checking_assert (!callee->global.inlined_to);
1814 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
1815 if (flag_indirect_inlining)
1816 add_new_edges_to_heap (edge_heap, new_indirect_edges);
1818 reset_edge_caches (edge->callee);
1819 reset_node_growth_cache (callee);
1821 update_callee_keys (edge_heap, where, updated_nodes);
1823 where = edge->caller;
1824 if (where->global.inlined_to)
1825 where = where->global.inlined_to;
1827 /* Our profitability metric can depend on local properties
1828 such as number of inlinable calls and size of the function body.
1829 After inlining these properties might change for the function we
1830 inlined into (since it's body size changed) and for the functions
1831 called by function we inlined (since number of it inlinable callers
1832 might change). */
1833 update_caller_keys (edge_heap, where, updated_nodes, NULL);
1834 bitmap_clear (updated_nodes);
1836 if (dump_file)
1838 fprintf (dump_file,
1839 " Inlined into %s which now has time %i and size %i,"
1840 "net change of %+i.\n",
1841 edge->caller->name (),
1842 inline_summary (edge->caller)->time,
1843 inline_summary (edge->caller)->size,
1844 overall_size - old_size);
1846 if (min_size > overall_size)
1848 min_size = overall_size;
1849 max_size = compute_max_insns (min_size);
1851 if (dump_file)
1852 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1856 free_growth_caches ();
1857 fibheap_delete (edge_heap);
1858 if (dump_file)
1859 fprintf (dump_file,
1860 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1861 initial_size, overall_size,
1862 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
1863 BITMAP_FREE (updated_nodes);
1864 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
1867 /* Flatten NODE. Performed both during early inlining and
1868 at IPA inlining time. */
1870 static void
1871 flatten_function (struct cgraph_node *node, bool early)
1873 struct cgraph_edge *e;
1875 /* We shouldn't be called recursively when we are being processed. */
1876 gcc_assert (node->aux == NULL);
1878 node->aux = (void *) node;
1880 for (e = node->callees; e; e = e->next_callee)
1882 struct cgraph_node *orig_callee;
1883 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
1885 /* We've hit cycle? It is time to give up. */
1886 if (callee->aux)
1888 if (dump_file)
1889 fprintf (dump_file,
1890 "Not inlining %s into %s to avoid cycle.\n",
1891 xstrdup (callee->name ()),
1892 xstrdup (e->caller->name ()));
1893 e->inline_failed = CIF_RECURSIVE_INLINING;
1894 continue;
1897 /* When the edge is already inlined, we just need to recurse into
1898 it in order to fully flatten the leaves. */
1899 if (!e->inline_failed)
1901 flatten_function (callee, early);
1902 continue;
1905 /* Flatten attribute needs to be processed during late inlining. For
1906 extra code quality we however do flattening during early optimization,
1907 too. */
1908 if (!early
1909 ? !can_inline_edge_p (e, true)
1910 : !can_early_inline_edge_p (e))
1911 continue;
1913 if (e->recursive_p ())
1915 if (dump_file)
1916 fprintf (dump_file, "Not inlining: recursive call.\n");
1917 continue;
1920 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1921 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
1923 if (dump_file)
1924 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1925 continue;
1928 /* Inline the edge and flatten the inline clone. Avoid
1929 recursing through the original node if the node was cloned. */
1930 if (dump_file)
1931 fprintf (dump_file, " Inlining %s into %s.\n",
1932 xstrdup (callee->name ()),
1933 xstrdup (e->caller->name ()));
1934 orig_callee = callee;
1935 inline_call (e, true, NULL, NULL, false);
1936 if (e->callee != orig_callee)
1937 orig_callee->aux = (void *) node;
1938 flatten_function (e->callee, early);
1939 if (e->callee != orig_callee)
1940 orig_callee->aux = NULL;
1943 node->aux = NULL;
1944 if (!node->global.inlined_to)
1945 inline_update_overall_summary (node);
1948 /* Count number of callers of NODE and store it into DATA (that
1949 points to int. Worker for cgraph_for_node_and_aliases. */
1951 static bool
1952 sum_callers (struct cgraph_node *node, void *data)
1954 struct cgraph_edge *e;
1955 int *num_calls = (int *)data;
1957 for (e = node->callers; e; e = e->next_caller)
1958 (*num_calls)++;
1959 return false;
1962 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
1963 DATA points to number of calls originally found so we avoid infinite
1964 recursion. */
1966 static bool
1967 inline_to_all_callers (struct cgraph_node *node, void *data)
1969 int *num_calls = (int *)data;
1970 bool callee_removed = false;
1972 while (node->callers && !node->global.inlined_to)
1974 struct cgraph_node *caller = node->callers->caller;
1976 if (dump_file)
1978 fprintf (dump_file,
1979 "\nInlining %s size %i.\n",
1980 node->name (),
1981 inline_summary (node)->size);
1982 fprintf (dump_file,
1983 " Called once from %s %i insns.\n",
1984 node->callers->caller->name (),
1985 inline_summary (node->callers->caller)->size);
1988 inline_call (node->callers, true, NULL, NULL, true, &callee_removed);
1989 if (dump_file)
1990 fprintf (dump_file,
1991 " Inlined into %s which now has %i size\n",
1992 caller->name (),
1993 inline_summary (caller)->size);
1994 if (!(*num_calls)--)
1996 if (dump_file)
1997 fprintf (dump_file, "New calls found; giving up.\n");
1998 return callee_removed;
2000 if (callee_removed)
2001 return true;
2003 return false;
2006 /* Output overall time estimate. */
2007 static void
2008 dump_overall_stats (void)
2010 int64_t sum_weighted = 0, sum = 0;
2011 struct cgraph_node *node;
2013 FOR_EACH_DEFINED_FUNCTION (node)
2014 if (!node->global.inlined_to
2015 && !node->alias)
2017 int time = inline_summary (node)->time;
2018 sum += time;
2019 sum_weighted += time * node->count;
2021 fprintf (dump_file, "Overall time estimate: "
2022 "%"PRId64" weighted by profile: "
2023 "%"PRId64"\n", sum, sum_weighted);
2026 /* Output some useful stats about inlining. */
2028 static void
2029 dump_inline_stats (void)
2031 int64_t inlined_cnt = 0, inlined_indir_cnt = 0;
2032 int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0;
2033 int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0;
2034 int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
2035 int64_t inlined_speculative = 0, inlined_speculative_ply = 0;
2036 int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
2037 int64_t reason[CIF_N_REASONS][3];
2038 int i;
2039 struct cgraph_node *node;
2041 memset (reason, 0, sizeof (reason));
2042 FOR_EACH_DEFINED_FUNCTION (node)
2044 struct cgraph_edge *e;
2045 for (e = node->callees; e; e = e->next_callee)
2047 if (e->inline_failed)
2049 reason[(int) e->inline_failed][0] += e->count;
2050 reason[(int) e->inline_failed][1] += e->frequency;
2051 reason[(int) e->inline_failed][2] ++;
2052 if (DECL_VIRTUAL_P (e->callee->decl))
2054 if (e->indirect_inlining_edge)
2055 noninlined_virt_indir_cnt += e->count;
2056 else
2057 noninlined_virt_cnt += e->count;
2059 else
2061 if (e->indirect_inlining_edge)
2062 noninlined_indir_cnt += e->count;
2063 else
2064 noninlined_cnt += e->count;
2067 else
2069 if (e->speculative)
2071 if (DECL_VIRTUAL_P (e->callee->decl))
2072 inlined_speculative_ply += e->count;
2073 else
2074 inlined_speculative += e->count;
2076 else if (DECL_VIRTUAL_P (e->callee->decl))
2078 if (e->indirect_inlining_edge)
2079 inlined_virt_indir_cnt += e->count;
2080 else
2081 inlined_virt_cnt += e->count;
2083 else
2085 if (e->indirect_inlining_edge)
2086 inlined_indir_cnt += e->count;
2087 else
2088 inlined_cnt += e->count;
2092 for (e = node->indirect_calls; e; e = e->next_callee)
2093 if (e->indirect_info->polymorphic)
2094 indirect_poly_cnt += e->count;
2095 else
2096 indirect_cnt += e->count;
2098 if (max_count)
2100 fprintf (dump_file,
2101 "Inlined %"PRId64 " + speculative "
2102 "%"PRId64 " + speculative polymorphic "
2103 "%"PRId64 " + previously indirect "
2104 "%"PRId64 " + virtual "
2105 "%"PRId64 " + virtual and previously indirect "
2106 "%"PRId64 "\n" "Not inlined "
2107 "%"PRId64 " + previously indirect "
2108 "%"PRId64 " + virtual "
2109 "%"PRId64 " + virtual and previously indirect "
2110 "%"PRId64 " + stil indirect "
2111 "%"PRId64 " + still indirect polymorphic "
2112 "%"PRId64 "\n", inlined_cnt,
2113 inlined_speculative, inlined_speculative_ply,
2114 inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
2115 noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
2116 noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
2117 fprintf (dump_file,
2118 "Removed speculations %"PRId64 "\n",
2119 spec_rem);
2121 dump_overall_stats ();
2122 fprintf (dump_file, "\nWhy inlining failed?\n");
2123 for (i = 0; i < CIF_N_REASONS; i++)
2124 if (reason[i][2])
2125 fprintf (dump_file, "%-50s: %8i calls, %8i freq, %"PRId64" count\n",
2126 cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
2127 (int) reason[i][2], (int) reason[i][1], reason[i][0]);
2130 /* Decide on the inlining. We do so in the topological order to avoid
2131 expenses on updating data structures. */
2133 static unsigned int
2134 ipa_inline (void)
2136 struct cgraph_node *node;
2137 int nnodes;
2138 struct cgraph_node **order;
2139 int i;
2140 int cold;
2141 bool remove_functions = false;
2143 if (!optimize)
2144 return 0;
2146 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
2148 if (in_lto_p && optimize)
2149 ipa_update_after_lto_read ();
2151 if (dump_file)
2152 dump_inline_summaries (dump_file);
2154 nnodes = ipa_reverse_postorder (order);
2156 FOR_EACH_FUNCTION (node)
2157 node->aux = 0;
2159 if (dump_file)
2160 fprintf (dump_file, "\nFlattening functions:\n");
2162 /* In the first pass handle functions to be flattened. Do this with
2163 a priority so none of our later choices will make this impossible. */
2164 for (i = nnodes - 1; i >= 0; i--)
2166 node = order[i];
2168 /* Handle nodes to be flattened.
2169 Ideally when processing callees we stop inlining at the
2170 entry of cycles, possibly cloning that entry point and
2171 try to flatten itself turning it into a self-recursive
2172 function. */
2173 if (lookup_attribute ("flatten",
2174 DECL_ATTRIBUTES (node->decl)) != NULL)
2176 if (dump_file)
2177 fprintf (dump_file,
2178 "Flattening %s\n", node->name ());
2179 flatten_function (node, false);
2182 if (dump_file)
2183 dump_overall_stats ();
2185 inline_small_functions ();
2187 /* Do first after-inlining removal. We want to remove all "stale" extern inline
2188 functions and virtual functions so we really know what is called once. */
2189 symtab->remove_unreachable_nodes (false, dump_file);
2190 free (order);
2192 /* Inline functions with a property that after inlining into all callers the
2193 code size will shrink because the out-of-line copy is eliminated.
2194 We do this regardless on the callee size as long as function growth limits
2195 are met. */
2196 if (dump_file)
2197 fprintf (dump_file,
2198 "\nDeciding on functions to be inlined into all callers and removing useless speculations:\n");
2200 /* Inlining one function called once has good chance of preventing
2201 inlining other function into the same callee. Ideally we should
2202 work in priority order, but probably inlining hot functions first
2203 is good cut without the extra pain of maintaining the queue.
2205 ??? this is not really fitting the bill perfectly: inlining function
2206 into callee often leads to better optimization of callee due to
2207 increased context for optimization.
2208 For example if main() function calls a function that outputs help
2209 and then function that does the main optmization, we should inline
2210 the second with priority even if both calls are cold by themselves.
2212 We probably want to implement new predicate replacing our use of
2213 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2214 to be hot. */
2215 for (cold = 0; cold <= 1; cold ++)
2217 FOR_EACH_DEFINED_FUNCTION (node)
2219 struct cgraph_edge *edge, *next;
2220 bool update=false;
2222 for (edge = node->callees; edge; edge = next)
2224 next = edge->next_callee;
2225 if (edge->speculative && !speculation_useful_p (edge, false))
2227 edge->resolve_speculation ();
2228 spec_rem += edge->count;
2229 update = true;
2230 remove_functions = true;
2233 if (update)
2235 struct cgraph_node *where = node->global.inlined_to
2236 ? node->global.inlined_to : node;
2237 reset_node_growth_cache (where);
2238 reset_edge_caches (where);
2239 inline_update_overall_summary (where);
2241 if (flag_inline_functions_called_once
2242 && want_inline_function_to_all_callers_p (node, cold))
2244 int num_calls = 0;
2245 node->call_for_symbol_thunks_and_aliases (sum_callers, &num_calls,
2246 true);
2247 while (node->call_for_symbol_thunks_and_aliases (inline_to_all_callers,
2248 &num_calls, true))
2250 remove_functions = true;
2255 /* Free ipa-prop structures if they are no longer needed. */
2256 if (optimize)
2257 ipa_free_all_structures_after_iinln ();
2259 if (dump_file)
2261 fprintf (dump_file,
2262 "\nInlined %i calls, eliminated %i functions\n\n",
2263 ncalls_inlined, nfunctions_inlined);
2264 dump_inline_stats ();
2267 if (dump_file)
2268 dump_inline_summaries (dump_file);
2269 /* In WPA we use inline summaries for partitioning process. */
2270 if (!flag_wpa)
2271 inline_free_summary ();
2272 return remove_functions ? TODO_remove_functions : 0;
2275 /* Inline always-inline function calls in NODE. */
2277 static bool
2278 inline_always_inline_functions (struct cgraph_node *node)
2280 struct cgraph_edge *e;
2281 bool inlined = false;
2283 for (e = node->callees; e; e = e->next_callee)
2285 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2286 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
2287 continue;
2289 if (e->recursive_p ())
2291 if (dump_file)
2292 fprintf (dump_file, " Not inlining recursive call to %s.\n",
2293 e->callee->name ());
2294 e->inline_failed = CIF_RECURSIVE_INLINING;
2295 continue;
2298 if (!can_early_inline_edge_p (e))
2300 /* Set inlined to true if the callee is marked "always_inline" but
2301 is not inlinable. This will allow flagging an error later in
2302 expand_call_inline in tree-inline.c. */
2303 if (lookup_attribute ("always_inline",
2304 DECL_ATTRIBUTES (callee->decl)) != NULL)
2305 inlined = true;
2306 continue;
2309 if (dump_file)
2310 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
2311 xstrdup (e->callee->name ()),
2312 xstrdup (e->caller->name ()));
2313 inline_call (e, true, NULL, NULL, false);
2314 inlined = true;
2316 if (inlined)
2317 inline_update_overall_summary (node);
2319 return inlined;
2322 /* Decide on the inlining. We do so in the topological order to avoid
2323 expenses on updating data structures. */
2325 static bool
2326 early_inline_small_functions (struct cgraph_node *node)
2328 struct cgraph_edge *e;
2329 bool inlined = false;
2331 for (e = node->callees; e; e = e->next_callee)
2333 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2334 if (!inline_summary (callee)->inlinable
2335 || !e->inline_failed)
2336 continue;
2338 /* Do not consider functions not declared inline. */
2339 if (!DECL_DECLARED_INLINE_P (callee->decl)
2340 && !flag_inline_small_functions
2341 && !flag_inline_functions)
2342 continue;
2344 if (dump_file)
2345 fprintf (dump_file, "Considering inline candidate %s.\n",
2346 callee->name ());
2348 if (!can_early_inline_edge_p (e))
2349 continue;
2351 if (e->recursive_p ())
2353 if (dump_file)
2354 fprintf (dump_file, " Not inlining: recursive call.\n");
2355 continue;
2358 if (!want_early_inline_function_p (e))
2359 continue;
2361 if (dump_file)
2362 fprintf (dump_file, " Inlining %s into %s.\n",
2363 xstrdup (callee->name ()),
2364 xstrdup (e->caller->name ()));
2365 inline_call (e, true, NULL, NULL, true);
2366 inlined = true;
2369 return inlined;
2372 unsigned int
2373 early_inliner (function *fun)
2375 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2376 struct cgraph_edge *edge;
2377 unsigned int todo = 0;
2378 int iterations = 0;
2379 bool inlined = false;
2381 if (seen_error ())
2382 return 0;
2384 /* Do nothing if datastructures for ipa-inliner are already computed. This
2385 happens when some pass decides to construct new function and
2386 cgraph_add_new_function calls lowering passes and early optimization on
2387 it. This may confuse ourself when early inliner decide to inline call to
2388 function clone, because function clones don't have parameter list in
2389 ipa-prop matching their signature. */
2390 if (ipa_node_params_vector.exists ())
2391 return 0;
2393 #ifdef ENABLE_CHECKING
2394 node->verify ();
2395 #endif
2396 node->remove_all_references ();
2398 /* Even when not optimizing or not inlining inline always-inline
2399 functions. */
2400 inlined = inline_always_inline_functions (node);
2402 if (!optimize
2403 || flag_no_inline
2404 || !flag_early_inlining
2405 /* Never inline regular functions into always-inline functions
2406 during incremental inlining. This sucks as functions calling
2407 always inline functions will get less optimized, but at the
2408 same time inlining of functions calling always inline
2409 function into an always inline function might introduce
2410 cycles of edges to be always inlined in the callgraph.
2412 We might want to be smarter and just avoid this type of inlining. */
2413 || DECL_DISREGARD_INLINE_LIMITS (node->decl))
2415 else if (lookup_attribute ("flatten",
2416 DECL_ATTRIBUTES (node->decl)) != NULL)
2418 /* When the function is marked to be flattened, recursively inline
2419 all calls in it. */
2420 if (dump_file)
2421 fprintf (dump_file,
2422 "Flattening %s\n", node->name ());
2423 flatten_function (node, true);
2424 inlined = true;
2426 else
2428 /* We iterate incremental inlining to get trivial cases of indirect
2429 inlining. */
2430 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
2431 && early_inline_small_functions (node))
2433 timevar_push (TV_INTEGRATION);
2434 todo |= optimize_inline_calls (current_function_decl);
2436 /* Technically we ought to recompute inline parameters so the new
2437 iteration of early inliner works as expected. We however have
2438 values approximately right and thus we only need to update edge
2439 info that might be cleared out for newly discovered edges. */
2440 for (edge = node->callees; edge; edge = edge->next_callee)
2442 struct inline_edge_summary *es = inline_edge_summary (edge);
2443 es->call_stmt_size
2444 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2445 es->call_stmt_time
2446 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2447 if (edge->callee->decl
2448 && !gimple_check_call_matching_types (
2449 edge->call_stmt, edge->callee->decl, false))
2450 edge->call_stmt_cannot_inline_p = true;
2452 if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
2453 inline_update_overall_summary (node);
2454 timevar_pop (TV_INTEGRATION);
2455 iterations++;
2456 inlined = false;
2458 if (dump_file)
2459 fprintf (dump_file, "Iterations: %i\n", iterations);
2462 if (inlined)
2464 timevar_push (TV_INTEGRATION);
2465 todo |= optimize_inline_calls (current_function_decl);
2466 timevar_pop (TV_INTEGRATION);
2469 fun->always_inline_functions_inlined = true;
2471 return todo;
2474 /* Do inlining of small functions. Doing so early helps profiling and other
2475 passes to be somewhat more effective and avoids some code duplication in
2476 later real inlining pass for testcases with very many function calls. */
2478 namespace {
2480 const pass_data pass_data_early_inline =
2482 GIMPLE_PASS, /* type */
2483 "einline", /* name */
2484 OPTGROUP_INLINE, /* optinfo_flags */
2485 TV_EARLY_INLINING, /* tv_id */
2486 PROP_ssa, /* properties_required */
2487 0, /* properties_provided */
2488 0, /* properties_destroyed */
2489 0, /* todo_flags_start */
2490 0, /* todo_flags_finish */
2493 class pass_early_inline : public gimple_opt_pass
2495 public:
2496 pass_early_inline (gcc::context *ctxt)
2497 : gimple_opt_pass (pass_data_early_inline, ctxt)
2500 /* opt_pass methods: */
2501 virtual unsigned int execute (function *);
2503 }; // class pass_early_inline
2505 unsigned int
2506 pass_early_inline::execute (function *fun)
2508 return early_inliner (fun);
2511 } // anon namespace
2513 gimple_opt_pass *
2514 make_pass_early_inline (gcc::context *ctxt)
2516 return new pass_early_inline (ctxt);
2519 namespace {
2521 const pass_data pass_data_ipa_inline =
2523 IPA_PASS, /* type */
2524 "inline", /* name */
2525 OPTGROUP_INLINE, /* optinfo_flags */
2526 TV_IPA_INLINING, /* tv_id */
2527 0, /* properties_required */
2528 0, /* properties_provided */
2529 0, /* properties_destroyed */
2530 0, /* todo_flags_start */
2531 ( TODO_dump_symtab ), /* todo_flags_finish */
2534 class pass_ipa_inline : public ipa_opt_pass_d
2536 public:
2537 pass_ipa_inline (gcc::context *ctxt)
2538 : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
2539 inline_generate_summary, /* generate_summary */
2540 inline_write_summary, /* write_summary */
2541 inline_read_summary, /* read_summary */
2542 NULL, /* write_optimization_summary */
2543 NULL, /* read_optimization_summary */
2544 NULL, /* stmt_fixup */
2545 0, /* function_transform_todo_flags_start */
2546 inline_transform, /* function_transform */
2547 NULL) /* variable_transform */
2550 /* opt_pass methods: */
2551 virtual unsigned int execute (function *) { return ipa_inline (); }
2553 }; // class pass_ipa_inline
2555 } // anon namespace
2557 ipa_opt_pass_d *
2558 make_pass_ipa_inline (gcc::context *ctxt)
2560 return new pass_ipa_inline (ctxt);