* gcc.dg/store-motion-fgcse-sm.c (dg-final): Cleanup
[official-gcc.git] / gcc / ipa-inline.c
blob72c0715dc566abe7f8810032b1a87d7ea2ef6a46
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
29 on).
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
34 inlining.
36 inlining heuristics
38 The inliner itself is split into two passes:
40 pass_early_inlining
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
55 flattening.
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
60 optimizers.
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
68 pass_ipa_inline
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "tm.h"
96 #include "tree.h"
97 #include "trans-mem.h"
98 #include "calls.h"
99 #include "tree-inline.h"
100 #include "langhooks.h"
101 #include "flags.h"
102 #include "diagnostic.h"
103 #include "gimple-pretty-print.h"
104 #include "params.h"
105 #include "intl.h"
106 #include "tree-pass.h"
107 #include "coverage.h"
108 #include "rtl.h"
109 #include "bitmap.h"
110 #include "profile.h"
111 #include "predict.h"
112 #include "vec.h"
113 #include "hashtab.h"
114 #include "hash-set.h"
115 #include "machmode.h"
116 #include "hard-reg-set.h"
117 #include "input.h"
118 #include "function.h"
119 #include "basic-block.h"
120 #include "tree-ssa-alias.h"
121 #include "internal-fn.h"
122 #include "gimple-expr.h"
123 #include "is-a.h"
124 #include "gimple.h"
125 #include "gimple-ssa.h"
126 #include "hash-map.h"
127 #include "plugin-api.h"
128 #include "ipa-ref.h"
129 #include "cgraph.h"
130 #include "alloc-pool.h"
131 #include "ipa-prop.h"
132 #include "except.h"
133 #include "target.h"
134 #include "ipa-inline.h"
135 #include "ipa-utils.h"
136 #include "sreal.h"
137 #include "auto-profile.h"
138 #include "cilk.h"
139 #include "builtins.h"
140 #include "fibonacci_heap.h"
142 typedef fibonacci_heap <long, cgraph_edge> edge_heap_t;
143 typedef fibonacci_node <long, cgraph_edge> edge_heap_node_t;
145 /* Statistics we collect about inlining algorithm. */
146 static int overall_size;
147 static gcov_type max_count;
148 static sreal max_count_real, max_relbenefit_real, half_int_min_real;
149 static gcov_type spec_rem;
151 /* Return false when inlining edge E would lead to violating
152 limits on function unit growth or stack usage growth.
154 The relative function body growth limit is present generally
155 to avoid problems with non-linear behavior of the compiler.
156 To allow inlining huge functions into tiny wrapper, the limit
157 is always based on the bigger of the two functions considered.
159 For stack growth limits we always base the growth in stack usage
160 of the callers. We want to prevent applications from segfaulting
161 on stack overflow when functions with huge stack frames gets
162 inlined. */
164 static bool
165 caller_growth_limits (struct cgraph_edge *e)
167 struct cgraph_node *to = e->caller;
168 struct cgraph_node *what = e->callee->ultimate_alias_target ();
169 int newsize;
170 int limit = 0;
171 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
172 struct inline_summary *info, *what_info, *outer_info = inline_summary (to);
174 /* Look for function e->caller is inlined to. While doing
175 so work out the largest function body on the way. As
176 described above, we want to base our function growth
177 limits based on that. Not on the self size of the
178 outer function, not on the self size of inline code
179 we immediately inline to. This is the most relaxed
180 interpretation of the rule "do not grow large functions
181 too much in order to prevent compiler from exploding". */
182 while (true)
184 info = inline_summary (to);
185 if (limit < info->self_size)
186 limit = info->self_size;
187 if (stack_size_limit < info->estimated_self_stack_size)
188 stack_size_limit = info->estimated_self_stack_size;
189 if (to->global.inlined_to)
190 to = to->callers->caller;
191 else
192 break;
195 what_info = inline_summary (what);
197 if (limit < what_info->self_size)
198 limit = what_info->self_size;
200 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
202 /* Check the size after inlining against the function limits. But allow
203 the function to shrink if it went over the limits by forced inlining. */
204 newsize = estimate_size_after_inlining (to, e);
205 if (newsize >= info->size
206 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
207 && newsize > limit)
209 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
210 return false;
213 if (!what_info->estimated_stack_size)
214 return true;
216 /* FIXME: Stack size limit often prevents inlining in Fortran programs
217 due to large i/o datastructures used by the Fortran front-end.
218 We ought to ignore this limit when we know that the edge is executed
219 on every invocation of the caller (i.e. its call statement dominates
220 exit block). We do not track this information, yet. */
221 stack_size_limit += ((gcov_type)stack_size_limit
222 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
224 inlined_stack = (outer_info->stack_frame_offset
225 + outer_info->estimated_self_stack_size
226 + what_info->estimated_stack_size);
227 /* Check new stack consumption with stack consumption at the place
228 stack is used. */
229 if (inlined_stack > stack_size_limit
230 /* If function already has large stack usage from sibling
231 inline call, we can inline, too.
232 This bit overoptimistically assume that we are good at stack
233 packing. */
234 && inlined_stack > info->estimated_stack_size
235 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
237 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
238 return false;
240 return true;
243 /* Dump info about why inlining has failed. */
245 static void
246 report_inline_failed_reason (struct cgraph_edge *e)
248 if (dump_file)
250 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
251 xstrdup (e->caller->name ()), e->caller->order,
252 xstrdup (e->callee->name ()), e->callee->order,
253 cgraph_inline_failed_string (e->inline_failed));
257 /* Decide whether sanitizer-related attributes allow inlining. */
259 static bool
260 sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
262 /* Don't care if sanitizer is disabled */
263 if (!(flag_sanitize & SANITIZE_ADDRESS))
264 return true;
266 if (!caller || !callee)
267 return true;
269 return !!lookup_attribute ("no_sanitize_address",
270 DECL_ATTRIBUTES (caller)) ==
271 !!lookup_attribute ("no_sanitize_address",
272 DECL_ATTRIBUTES (callee));
275 /* Decide if we can inline the edge and possibly update
276 inline_failed reason.
277 We check whether inlining is possible at all and whether
278 caller growth limits allow doing so.
280 if REPORT is true, output reason to the dump file.
282 if DISREGARD_LIMITS is true, ignore size limits.*/
284 static bool
285 can_inline_edge_p (struct cgraph_edge *e, bool report,
286 bool disregard_limits = false)
288 bool inlinable = true;
289 enum availability avail;
290 cgraph_node *callee = e->callee->ultimate_alias_target (&avail);
291 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e->caller->decl);
292 tree callee_tree
293 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
294 struct function *caller_fun = e->caller->get_fun ();
295 struct function *callee_fun = callee ? callee->get_fun () : NULL;
297 gcc_assert (e->inline_failed);
299 if (!callee || !callee->definition)
301 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
302 inlinable = false;
304 else if (callee->calls_comdat_local)
306 e->inline_failed = CIF_USES_COMDAT_LOCAL;
307 inlinable = false;
309 else if (!inline_summary (callee)->inlinable
310 || (caller_fun && fn_contains_cilk_spawn_p (caller_fun)))
312 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
313 inlinable = false;
315 else if (avail <= AVAIL_INTERPOSABLE)
317 e->inline_failed = CIF_OVERWRITABLE;
318 inlinable = false;
320 else if (e->call_stmt_cannot_inline_p)
322 if (e->inline_failed != CIF_FUNCTION_NOT_OPTIMIZED)
323 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
324 inlinable = false;
326 /* Don't inline if the functions have different EH personalities. */
327 else if (DECL_FUNCTION_PERSONALITY (e->caller->decl)
328 && DECL_FUNCTION_PERSONALITY (callee->decl)
329 && (DECL_FUNCTION_PERSONALITY (e->caller->decl)
330 != DECL_FUNCTION_PERSONALITY (callee->decl)))
332 e->inline_failed = CIF_EH_PERSONALITY;
333 inlinable = false;
335 /* TM pure functions should not be inlined into non-TM_pure
336 functions. */
337 else if (is_tm_pure (callee->decl)
338 && !is_tm_pure (e->caller->decl))
340 e->inline_failed = CIF_UNSPECIFIED;
341 inlinable = false;
343 /* Don't inline if the callee can throw non-call exceptions but the
344 caller cannot.
345 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
346 Move the flag into cgraph node or mirror it in the inline summary. */
347 else if (callee_fun && callee_fun->can_throw_non_call_exceptions
348 && !(caller_fun && caller_fun->can_throw_non_call_exceptions))
350 e->inline_failed = CIF_NON_CALL_EXCEPTIONS;
351 inlinable = false;
353 /* Check compatibility of target optimization options. */
354 else if (!targetm.target_option.can_inline_p (e->caller->decl,
355 callee->decl))
357 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
358 inlinable = false;
360 /* Don't inline a function with mismatched sanitization attributes. */
361 else if (!sanitize_attrs_match_for_inline_p (e->caller->decl, callee->decl))
363 e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
364 inlinable = false;
366 /* Check if caller growth allows the inlining. */
367 else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
368 && !disregard_limits
369 && !lookup_attribute ("flatten",
370 DECL_ATTRIBUTES
371 (e->caller->global.inlined_to
372 ? e->caller->global.inlined_to->decl
373 : e->caller->decl))
374 && !caller_growth_limits (e))
375 inlinable = false;
376 /* Don't inline a function with a higher optimization level than the
377 caller. FIXME: this is really just tip of iceberg of handling
378 optimization attribute. */
379 else if (caller_tree != callee_tree)
381 if (((opt_for_fn (e->caller->decl, optimize)
382 > opt_for_fn (e->callee->decl, optimize))
383 || (opt_for_fn (e->caller->decl, optimize_size)
384 != opt_for_fn (e->callee->decl, optimize_size)))
385 /* gcc.dg/pr43564.c. Look at forced inline even in -O0. */
386 && !DECL_DISREGARD_INLINE_LIMITS (e->callee->decl))
388 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
389 inlinable = false;
393 if (!inlinable && report)
394 report_inline_failed_reason (e);
395 return inlinable;
399 /* Return true if the edge E is inlinable during early inlining. */
401 static bool
402 can_early_inline_edge_p (struct cgraph_edge *e)
404 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
405 /* Early inliner might get called at WPA stage when IPA pass adds new
406 function. In this case we can not really do any of early inlining
407 because function bodies are missing. */
408 if (!gimple_has_body_p (callee->decl))
410 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
411 return false;
413 /* In early inliner some of callees may not be in SSA form yet
414 (i.e. the callgraph is cyclic and we did not process
415 the callee by early inliner, yet). We don't have CIF code for this
416 case; later we will re-do the decision in the real inliner. */
417 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
418 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
420 if (dump_file)
421 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
422 return false;
424 if (!can_inline_edge_p (e, true))
425 return false;
426 return true;
430 /* Return number of calls in N. Ignore cheap builtins. */
432 static int
433 num_calls (struct cgraph_node *n)
435 struct cgraph_edge *e;
436 int num = 0;
438 for (e = n->callees; e; e = e->next_callee)
439 if (!is_inexpensive_builtin (e->callee->decl))
440 num++;
441 return num;
445 /* Return true if we are interested in inlining small function. */
447 static bool
448 want_early_inline_function_p (struct cgraph_edge *e)
450 bool want_inline = true;
451 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
453 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
455 /* For AutoFDO, we need to make sure that before profile annotation, all
456 hot paths' IR look exactly the same as profiled binary. As a result,
457 in einliner, we will disregard size limit and inline those callsites
458 that are:
459 * inlined in the profiled binary, and
460 * the cloned callee has enough samples to be considered "hot". */
461 else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e))
463 else if (!DECL_DECLARED_INLINE_P (callee->decl)
464 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
466 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
467 report_inline_failed_reason (e);
468 want_inline = false;
470 else
472 int growth = estimate_edge_growth (e);
473 int n;
475 if (growth <= 0)
477 else if (!e->maybe_hot_p ()
478 && growth > 0)
480 if (dump_file)
481 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
482 "call is cold and code would grow by %i\n",
483 xstrdup (e->caller->name ()),
484 e->caller->order,
485 xstrdup (callee->name ()), callee->order,
486 growth);
487 want_inline = false;
489 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
491 if (dump_file)
492 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
493 "growth %i exceeds --param early-inlining-insns\n",
494 xstrdup (e->caller->name ()),
495 e->caller->order,
496 xstrdup (callee->name ()), callee->order,
497 growth);
498 want_inline = false;
500 else if ((n = num_calls (callee)) != 0
501 && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
503 if (dump_file)
504 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
505 "growth %i exceeds --param early-inlining-insns "
506 "divided by number of calls\n",
507 xstrdup (e->caller->name ()),
508 e->caller->order,
509 xstrdup (callee->name ()), callee->order,
510 growth);
511 want_inline = false;
514 return want_inline;
517 /* Compute time of the edge->caller + edge->callee execution when inlining
518 does not happen. */
520 inline gcov_type
521 compute_uninlined_call_time (struct inline_summary *callee_info,
522 struct cgraph_edge *edge)
524 gcov_type uninlined_call_time =
525 RDIV ((gcov_type)callee_info->time * MAX (edge->frequency, 1),
526 CGRAPH_FREQ_BASE);
527 gcov_type caller_time = inline_summary (edge->caller->global.inlined_to
528 ? edge->caller->global.inlined_to
529 : edge->caller)->time;
530 return uninlined_call_time + caller_time;
533 /* Same as compute_uinlined_call_time but compute time when inlining
534 does happen. */
536 inline gcov_type
537 compute_inlined_call_time (struct cgraph_edge *edge,
538 int edge_time)
540 gcov_type caller_time = inline_summary (edge->caller->global.inlined_to
541 ? edge->caller->global.inlined_to
542 : edge->caller)->time;
543 gcov_type time = (caller_time
544 + RDIV (((gcov_type) edge_time
545 - inline_edge_summary (edge)->call_stmt_time)
546 * MAX (edge->frequency, 1), CGRAPH_FREQ_BASE));
547 /* Possible one roundoff error, but watch for overflows. */
548 gcc_checking_assert (time >= INT_MIN / 2);
549 if (time < 0)
550 time = 0;
551 return time;
554 /* Return true if the speedup for inlining E is bigger than
555 PARAM_MAX_INLINE_MIN_SPEEDUP. */
557 static bool
558 big_speedup_p (struct cgraph_edge *e)
560 gcov_type time = compute_uninlined_call_time (inline_summary (e->callee),
562 gcov_type inlined_time = compute_inlined_call_time (e,
563 estimate_edge_time (e));
564 if (time - inlined_time
565 > RDIV (time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP), 100))
566 return true;
567 return false;
570 /* Return true if we are interested in inlining small function.
571 When REPORT is true, report reason to dump file. */
573 static bool
574 want_inline_small_function_p (struct cgraph_edge *e, bool report)
576 bool want_inline = true;
577 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
579 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
581 else if (!DECL_DECLARED_INLINE_P (callee->decl)
582 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
584 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
585 want_inline = false;
587 /* Do fast and conservative check if the function can be good
588 inline candidate. At the moment we allow inline hints to
589 promote non-inline functions to inline and we increase
590 MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
591 else if ((!DECL_DECLARED_INLINE_P (callee->decl)
592 && (!e->count || !e->maybe_hot_p ()))
593 && inline_summary (callee)->min_size
594 - inline_edge_summary (e)->call_stmt_size
595 > MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
597 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
598 want_inline = false;
600 else if ((DECL_DECLARED_INLINE_P (callee->decl) || e->count)
601 && inline_summary (callee)->min_size
602 - inline_edge_summary (e)->call_stmt_size
603 > 16 * MAX_INLINE_INSNS_SINGLE)
605 e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
606 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
607 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
608 want_inline = false;
610 else
612 int growth = estimate_edge_growth (e);
613 inline_hints hints = estimate_edge_hints (e);
614 bool big_speedup = big_speedup_p (e);
616 if (growth <= 0)
618 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
619 hints suggests that inlining given function is very profitable. */
620 else if (DECL_DECLARED_INLINE_P (callee->decl)
621 && growth >= MAX_INLINE_INSNS_SINGLE
622 && ((!big_speedup
623 && !(hints & (INLINE_HINT_indirect_call
624 | INLINE_HINT_known_hot
625 | INLINE_HINT_loop_iterations
626 | INLINE_HINT_array_index
627 | INLINE_HINT_loop_stride)))
628 || growth >= MAX_INLINE_INSNS_SINGLE * 16))
630 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
631 want_inline = false;
633 else if (!DECL_DECLARED_INLINE_P (callee->decl)
634 && !opt_for_fn (e->caller->decl, flag_inline_functions))
636 /* growth_likely_positive is expensive, always test it last. */
637 if (growth >= MAX_INLINE_INSNS_SINGLE
638 || growth_likely_positive (callee, growth))
640 e->inline_failed = CIF_NOT_DECLARED_INLINED;
641 want_inline = false;
644 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
645 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
646 inlining given function is very profitable. */
647 else if (!DECL_DECLARED_INLINE_P (callee->decl)
648 && !big_speedup
649 && !(hints & INLINE_HINT_known_hot)
650 && growth >= ((hints & (INLINE_HINT_indirect_call
651 | INLINE_HINT_loop_iterations
652 | INLINE_HINT_array_index
653 | INLINE_HINT_loop_stride))
654 ? MAX (MAX_INLINE_INSNS_AUTO,
655 MAX_INLINE_INSNS_SINGLE)
656 : MAX_INLINE_INSNS_AUTO))
658 /* growth_likely_positive is expensive, always test it last. */
659 if (growth >= MAX_INLINE_INSNS_SINGLE
660 || growth_likely_positive (callee, growth))
662 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
663 want_inline = false;
666 /* If call is cold, do not inline when function body would grow. */
667 else if (!e->maybe_hot_p ()
668 && (growth >= MAX_INLINE_INSNS_SINGLE
669 || growth_likely_positive (callee, growth)))
671 e->inline_failed = CIF_UNLIKELY_CALL;
672 want_inline = false;
675 if (!want_inline && report)
676 report_inline_failed_reason (e);
677 return want_inline;
680 /* EDGE is self recursive edge.
681 We hand two cases - when function A is inlining into itself
682 or when function A is being inlined into another inliner copy of function
683 A within function B.
685 In first case OUTER_NODE points to the toplevel copy of A, while
686 in the second case OUTER_NODE points to the outermost copy of A in B.
688 In both cases we want to be extra selective since
689 inlining the call will just introduce new recursive calls to appear. */
691 static bool
692 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
693 struct cgraph_node *outer_node,
694 bool peeling,
695 int depth)
697 char const *reason = NULL;
698 bool want_inline = true;
699 int caller_freq = CGRAPH_FREQ_BASE;
700 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
702 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
703 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
705 if (!edge->maybe_hot_p ())
707 reason = "recursive call is cold";
708 want_inline = false;
710 else if (max_count && !outer_node->count)
712 reason = "not executed in profile";
713 want_inline = false;
715 else if (depth > max_depth)
717 reason = "--param max-inline-recursive-depth exceeded.";
718 want_inline = false;
721 if (outer_node->global.inlined_to)
722 caller_freq = outer_node->callers->frequency;
724 if (!caller_freq)
726 reason = "function is inlined and unlikely";
727 want_inline = false;
730 if (!want_inline)
732 /* Inlining of self recursive function into copy of itself within other function
733 is transformation similar to loop peeling.
735 Peeling is profitable if we can inline enough copies to make probability
736 of actual call to the self recursive function very small. Be sure that
737 the probability of recursion is small.
739 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
740 This way the expected number of recision is at most max_depth. */
741 else if (peeling)
743 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
744 / max_depth);
745 int i;
746 for (i = 1; i < depth; i++)
747 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
748 if (max_count
749 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
750 >= max_prob))
752 reason = "profile of recursive call is too large";
753 want_inline = false;
755 if (!max_count
756 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
757 >= max_prob))
759 reason = "frequency of recursive call is too large";
760 want_inline = false;
763 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
764 depth is large. We reduce function call overhead and increase chances that
765 things fit in hardware return predictor.
767 Recursive inlining might however increase cost of stack frame setup
768 actually slowing down functions whose recursion tree is wide rather than
769 deep.
771 Deciding reliably on when to do recursive inlining without profile feedback
772 is tricky. For now we disable recursive inlining when probability of self
773 recursion is low.
775 Recursive inlining of self recursive call within loop also results in large loop
776 depths that generally optimize badly. We may want to throttle down inlining
777 in those cases. In particular this seems to happen in one of libstdc++ rb tree
778 methods. */
779 else
781 if (max_count
782 && (edge->count * 100 / outer_node->count
783 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
785 reason = "profile of recursive call is too small";
786 want_inline = false;
788 else if (!max_count
789 && (edge->frequency * 100 / caller_freq
790 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
792 reason = "frequency of recursive call is too small";
793 want_inline = false;
796 if (!want_inline && dump_file)
797 fprintf (dump_file, " not inlining recursively: %s\n", reason);
798 return want_inline;
801 /* Return true when NODE has uninlinable caller;
802 set HAS_HOT_CALL if it has hot call.
803 Worker for cgraph_for_node_and_aliases. */
805 static bool
806 check_callers (struct cgraph_node *node, void *has_hot_call)
808 struct cgraph_edge *e;
809 for (e = node->callers; e; e = e->next_caller)
811 if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once))
812 return true;
813 if (!can_inline_edge_p (e, true))
814 return true;
815 if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
816 *(bool *)has_hot_call = true;
818 return false;
821 /* If NODE has a caller, return true. */
823 static bool
824 has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
826 if (node->callers)
827 return true;
828 return false;
831 /* Decide if inlining NODE would reduce unit size by eliminating
832 the offline copy of function.
833 When COLD is true the cold calls are considered, too. */
835 static bool
836 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
838 bool has_hot_call = false;
840 if (node->ultimate_alias_target () != node)
841 return false;
842 /* Already inlined? */
843 if (node->global.inlined_to)
844 return false;
845 /* Does it have callers? */
846 if (!node->call_for_symbol_thunks_and_aliases (has_caller_p, NULL, true))
847 return false;
848 /* Inlining into all callers would increase size? */
849 if (estimate_growth (node) > 0)
850 return false;
851 /* All inlines must be possible. */
852 if (node->call_for_symbol_thunks_and_aliases (check_callers, &has_hot_call,
853 true))
854 return false;
855 if (!cold && !has_hot_call)
856 return false;
857 return true;
860 #define RELATIVE_TIME_BENEFIT_RANGE (INT_MAX / 64)
862 /* Return relative time improvement for inlining EDGE in range
863 1...RELATIVE_TIME_BENEFIT_RANGE */
865 static inline int
866 relative_time_benefit (struct inline_summary *callee_info,
867 struct cgraph_edge *edge,
868 int edge_time)
870 gcov_type relbenefit;
871 gcov_type uninlined_call_time = compute_uninlined_call_time (callee_info, edge);
872 gcov_type inlined_call_time = compute_inlined_call_time (edge, edge_time);
874 /* Inlining into extern inline function is not a win. */
875 if (DECL_EXTERNAL (edge->caller->global.inlined_to
876 ? edge->caller->global.inlined_to->decl
877 : edge->caller->decl))
878 return 1;
880 /* Watch overflows. */
881 gcc_checking_assert (uninlined_call_time >= 0);
882 gcc_checking_assert (inlined_call_time >= 0);
883 gcc_checking_assert (uninlined_call_time >= inlined_call_time);
885 /* Compute relative time benefit, i.e. how much the call becomes faster.
886 ??? perhaps computing how much the caller+calle together become faster
887 would lead to more realistic results. */
888 if (!uninlined_call_time)
889 uninlined_call_time = 1;
890 relbenefit =
891 RDIV (((gcov_type)uninlined_call_time - inlined_call_time) * RELATIVE_TIME_BENEFIT_RANGE,
892 uninlined_call_time);
893 relbenefit = MIN (relbenefit, RELATIVE_TIME_BENEFIT_RANGE);
894 gcc_checking_assert (relbenefit >= 0);
895 relbenefit = MAX (relbenefit, 1);
896 return relbenefit;
900 /* A cost model driving the inlining heuristics in a way so the edges with
901 smallest badness are inlined first. After each inlining is performed
902 the costs of all caller edges of nodes affected are recomputed so the
903 metrics may accurately depend on values such as number of inlinable callers
904 of the function or function body size. */
906 static int
907 edge_badness (struct cgraph_edge *edge, bool dump)
909 gcov_type badness;
910 int growth, edge_time;
911 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
912 struct inline_summary *callee_info = inline_summary (callee);
913 inline_hints hints;
915 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
916 return INT_MIN;
918 growth = estimate_edge_growth (edge);
919 edge_time = estimate_edge_time (edge);
920 hints = estimate_edge_hints (edge);
921 gcc_checking_assert (edge_time >= 0);
922 gcc_checking_assert (edge_time <= callee_info->time);
923 gcc_checking_assert (growth <= callee_info->size);
925 if (dump)
927 fprintf (dump_file, " Badness calculation for %s/%i -> %s/%i\n",
928 xstrdup (edge->caller->name ()),
929 edge->caller->order,
930 xstrdup (callee->name ()),
931 edge->callee->order);
932 fprintf (dump_file, " size growth %i, time %i ",
933 growth,
934 edge_time);
935 dump_inline_hints (dump_file, hints);
936 if (big_speedup_p (edge))
937 fprintf (dump_file, " big_speedup");
938 fprintf (dump_file, "\n");
941 /* Always prefer inlining saving code size. */
942 if (growth <= 0)
944 badness = INT_MIN / 2 + growth;
945 if (dump)
946 fprintf (dump_file, " %i: Growth %i <= 0\n", (int) badness,
947 growth);
950 /* When profiling is available, compute badness as:
952 relative_edge_count * relative_time_benefit
953 goodness = -------------------------------------------
954 growth_f_caller
955 badness = -goodness
957 The fraction is upside down, because on edge counts and time beneits
958 the bounds are known. Edge growth is essentially unlimited. */
960 else if (max_count)
962 int relbenefit = relative_time_benefit (callee_info, edge, edge_time);
963 /* Capping edge->count to max_count. edge->count can be larger than
964 max_count if an inline adds new edges which increase max_count
965 after max_count is computed. */
966 gcov_type edge_count = edge->count > max_count ? max_count : edge->count;
968 sreal relbenefit_real (relbenefit, 0);
969 sreal growth_real (growth, 0);
971 /* relative_edge_count. */
972 sreal tmp (edge_count, 0);
973 tmp /= max_count_real;
975 /* relative_time_benefit. */
976 tmp *= relbenefit_real;
977 tmp /= max_relbenefit_real;
979 /* growth_f_caller. */
980 tmp *= half_int_min_real;
981 tmp /= growth_real;
983 badness = -1 * tmp.to_int ();
985 if (dump)
987 fprintf (dump_file,
988 " %i (relative %f): profile info. Relative count %f%s"
989 " * Relative benefit %f\n",
990 (int) badness, (double) badness / INT_MIN,
991 (double) edge_count / max_count,
992 edge->count > max_count ? " (capped to max_count)" : "",
993 relbenefit * 100.0 / RELATIVE_TIME_BENEFIT_RANGE);
997 /* When function local profile is available. Compute badness as:
999 relative_time_benefit
1000 goodness = ---------------------------------
1001 growth_of_caller * overall_growth
1003 badness = - goodness
1005 compensated by the inline hints.
1007 /* TODO: We ought suport mixing units where some functions are profiled
1008 and some not. */
1009 else if (flag_guess_branch_prob)
1011 badness = (relative_time_benefit (callee_info, edge, edge_time)
1012 * (INT_MIN / 16 / RELATIVE_TIME_BENEFIT_RANGE));
1013 badness /= (MIN (65536/2, growth) * MIN (65536/2, MAX (1, callee_info->growth)));
1014 gcc_checking_assert (badness <=0 && badness >= INT_MIN / 16);
1015 if ((hints & (INLINE_HINT_indirect_call
1016 | INLINE_HINT_loop_iterations
1017 | INLINE_HINT_array_index
1018 | INLINE_HINT_loop_stride))
1019 || callee_info->growth <= 0)
1020 badness *= 8;
1021 if (hints & (INLINE_HINT_same_scc))
1022 badness /= 16;
1023 else if (hints & (INLINE_HINT_in_scc))
1024 badness /= 8;
1025 else if (hints & (INLINE_HINT_cross_module))
1026 badness /= 2;
1027 gcc_checking_assert (badness <= 0 && badness >= INT_MIN / 2);
1028 if ((hints & INLINE_HINT_declared_inline) && badness >= INT_MIN / 32)
1029 badness *= 16;
1030 if (dump)
1032 fprintf (dump_file,
1033 " %i: guessed profile. frequency %f,"
1034 " benefit %f%%, time w/o inlining %i, time w inlining %i"
1035 " overall growth %i (current) %i (original)\n",
1036 (int) badness, (double)edge->frequency / CGRAPH_FREQ_BASE,
1037 relative_time_benefit (callee_info, edge, edge_time) * 100.0
1038 / RELATIVE_TIME_BENEFIT_RANGE,
1039 (int)compute_uninlined_call_time (callee_info, edge),
1040 (int)compute_inlined_call_time (edge, edge_time),
1041 estimate_growth (callee),
1042 callee_info->growth);
1045 /* When function local profile is not available or it does not give
1046 useful information (ie frequency is zero), base the cost on
1047 loop nest and overall size growth, so we optimize for overall number
1048 of functions fully inlined in program. */
1049 else
1051 int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
1052 badness = growth * 256;
1054 /* Decrease badness if call is nested. */
1055 if (badness > 0)
1056 badness >>= nest;
1057 else
1059 badness <<= nest;
1061 if (dump)
1062 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
1063 nest);
1066 /* Ensure that we did not overflow in all the fixed point math above. */
1067 gcc_assert (badness >= INT_MIN);
1068 gcc_assert (badness <= INT_MAX - 1);
1069 /* Make recursive inlining happen always after other inlining is done. */
1070 if (edge->recursive_p ())
1071 return badness + 1;
1072 else
1073 return badness;
1076 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1077 static inline void
1078 update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
1080 int badness = edge_badness (edge, false);
1081 if (edge->aux)
1083 edge_heap_node_t *n = (edge_heap_node_t *) edge->aux;
1084 gcc_checking_assert (n->get_data () == edge);
1086 /* fibonacci_heap::replace_key only decrease the keys.
1087 When we increase the key we do not update heap
1088 and instead re-insert the element once it becomes
1089 a minimum of heap. */
1090 if (badness < n->get_key ())
1092 if (dump_file && (dump_flags & TDF_DETAILS))
1094 fprintf (dump_file,
1095 " decreasing badness %s/%i -> %s/%i, %i to %i\n",
1096 xstrdup (edge->caller->name ()),
1097 edge->caller->order,
1098 xstrdup (edge->callee->name ()),
1099 edge->callee->order,
1100 (int)n->get_key (),
1101 badness);
1103 heap->decrease_key (n, badness);
1104 gcc_checking_assert (n->get_key () == badness);
1107 else
1109 if (dump_file && (dump_flags & TDF_DETAILS))
1111 fprintf (dump_file,
1112 " enqueuing call %s/%i -> %s/%i, badness %i\n",
1113 xstrdup (edge->caller->name ()),
1114 edge->caller->order,
1115 xstrdup (edge->callee->name ()),
1116 edge->callee->order,
1117 badness);
1119 edge->aux = heap->insert (badness, edge);
1124 /* NODE was inlined.
1125 All caller edges needs to be resetted because
1126 size estimates change. Similarly callees needs reset
1127 because better context may be known. */
1129 static void
1130 reset_edge_caches (struct cgraph_node *node)
1132 struct cgraph_edge *edge;
1133 struct cgraph_edge *e = node->callees;
1134 struct cgraph_node *where = node;
1135 struct ipa_ref *ref;
1137 if (where->global.inlined_to)
1138 where = where->global.inlined_to;
1140 /* WHERE body size has changed, the cached growth is invalid. */
1141 reset_node_growth_cache (where);
1143 for (edge = where->callers; edge; edge = edge->next_caller)
1144 if (edge->inline_failed)
1145 reset_edge_growth_cache (edge);
1147 FOR_EACH_ALIAS (where, ref)
1148 reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
1150 if (!e)
1151 return;
1153 while (true)
1154 if (!e->inline_failed && e->callee->callees)
1155 e = e->callee->callees;
1156 else
1158 if (e->inline_failed)
1159 reset_edge_growth_cache (e);
1160 if (e->next_callee)
1161 e = e->next_callee;
1162 else
1166 if (e->caller == node)
1167 return;
1168 e = e->caller->callers;
1170 while (!e->next_callee);
1171 e = e->next_callee;
1176 /* Recompute HEAP nodes for each of caller of NODE.
1177 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1178 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1179 it is inlinable. Otherwise check all edges. */
1181 static void
1182 update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
1183 bitmap updated_nodes,
1184 struct cgraph_edge *check_inlinablity_for)
1186 struct cgraph_edge *edge;
1187 struct ipa_ref *ref;
1189 if ((!node->alias && !inline_summary (node)->inlinable)
1190 || node->global.inlined_to)
1191 return;
1192 if (!bitmap_set_bit (updated_nodes, node->uid))
1193 return;
1195 FOR_EACH_ALIAS (node, ref)
1197 struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
1198 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1201 for (edge = node->callers; edge; edge = edge->next_caller)
1202 if (edge->inline_failed)
1204 if (!check_inlinablity_for
1205 || check_inlinablity_for == edge)
1207 if (can_inline_edge_p (edge, false)
1208 && want_inline_small_function_p (edge, false))
1209 update_edge_key (heap, edge);
1210 else if (edge->aux)
1212 report_inline_failed_reason (edge);
1213 heap->delete_node ((edge_heap_node_t *) edge->aux);
1214 edge->aux = NULL;
1217 else if (edge->aux)
1218 update_edge_key (heap, edge);
1222 /* Recompute HEAP nodes for each uninlined call in NODE.
1223 This is used when we know that edge badnesses are going only to increase
1224 (we introduced new call site) and thus all we need is to insert newly
1225 created edges into heap. */
1227 static void
1228 update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
1229 bitmap updated_nodes)
1231 struct cgraph_edge *e = node->callees;
1233 if (!e)
1234 return;
1235 while (true)
1236 if (!e->inline_failed && e->callee->callees)
1237 e = e->callee->callees;
1238 else
1240 enum availability avail;
1241 struct cgraph_node *callee;
1242 /* We do not reset callee growth cache here. Since we added a new call,
1243 growth chould have just increased and consequentely badness metric
1244 don't need updating. */
1245 if (e->inline_failed
1246 && (callee = e->callee->ultimate_alias_target (&avail))
1247 && inline_summary (callee)->inlinable
1248 && avail >= AVAIL_AVAILABLE
1249 && !bitmap_bit_p (updated_nodes, callee->uid))
1251 if (can_inline_edge_p (e, false)
1252 && want_inline_small_function_p (e, false))
1253 update_edge_key (heap, e);
1254 else if (e->aux)
1256 report_inline_failed_reason (e);
1257 heap->delete_node ((edge_heap_node_t *) e->aux);
1258 e->aux = NULL;
1261 if (e->next_callee)
1262 e = e->next_callee;
1263 else
1267 if (e->caller == node)
1268 return;
1269 e = e->caller->callers;
1271 while (!e->next_callee);
1272 e = e->next_callee;
1277 /* Enqueue all recursive calls from NODE into priority queue depending on
1278 how likely we want to recursively inline the call. */
1280 static void
1281 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1282 edge_heap_t *heap)
1284 struct cgraph_edge *e;
1285 enum availability avail;
1287 for (e = where->callees; e; e = e->next_callee)
1288 if (e->callee == node
1289 || (e->callee->ultimate_alias_target (&avail) == node
1290 && avail > AVAIL_INTERPOSABLE))
1292 /* When profile feedback is available, prioritize by expected number
1293 of calls. */
1294 heap->insert (!max_count ? -e->frequency
1295 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
1298 for (e = where->callees; e; e = e->next_callee)
1299 if (!e->inline_failed)
1300 lookup_recursive_calls (node, e->callee, heap);
1303 /* Decide on recursive inlining: in the case function has recursive calls,
1304 inline until body size reaches given argument. If any new indirect edges
1305 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1306 is NULL. */
1308 static bool
1309 recursive_inlining (struct cgraph_edge *edge,
1310 vec<cgraph_edge *> *new_edges)
1312 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1313 edge_heap_t heap (LONG_MIN);
1314 struct cgraph_node *node;
1315 struct cgraph_edge *e;
1316 struct cgraph_node *master_clone = NULL, *next;
1317 int depth = 0;
1318 int n = 0;
1320 node = edge->caller;
1321 if (node->global.inlined_to)
1322 node = node->global.inlined_to;
1324 if (DECL_DECLARED_INLINE_P (node->decl))
1325 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1327 /* Make sure that function is small enough to be considered for inlining. */
1328 if (estimate_size_after_inlining (node, edge) >= limit)
1329 return false;
1330 lookup_recursive_calls (node, node, &heap);
1331 if (heap.empty ())
1332 return false;
1334 if (dump_file)
1335 fprintf (dump_file,
1336 " Performing recursive inlining on %s\n",
1337 node->name ());
1339 /* Do the inlining and update list of recursive call during process. */
1340 while (!heap.empty ())
1342 struct cgraph_edge *curr = heap.extract_min ();
1343 struct cgraph_node *cnode, *dest = curr->callee;
1345 if (!can_inline_edge_p (curr, true))
1346 continue;
1348 /* MASTER_CLONE is produced in the case we already started modified
1349 the function. Be sure to redirect edge to the original body before
1350 estimating growths otherwise we will be seeing growths after inlining
1351 the already modified body. */
1352 if (master_clone)
1354 curr->redirect_callee (master_clone);
1355 reset_edge_growth_cache (curr);
1358 if (estimate_size_after_inlining (node, curr) > limit)
1360 curr->redirect_callee (dest);
1361 reset_edge_growth_cache (curr);
1362 break;
1365 depth = 1;
1366 for (cnode = curr->caller;
1367 cnode->global.inlined_to; cnode = cnode->callers->caller)
1368 if (node->decl
1369 == curr->callee->ultimate_alias_target ()->decl)
1370 depth++;
1372 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1374 curr->redirect_callee (dest);
1375 reset_edge_growth_cache (curr);
1376 continue;
1379 if (dump_file)
1381 fprintf (dump_file,
1382 " Inlining call of depth %i", depth);
1383 if (node->count)
1385 fprintf (dump_file, " called approx. %.2f times per call",
1386 (double)curr->count / node->count);
1388 fprintf (dump_file, "\n");
1390 if (!master_clone)
1392 /* We need original clone to copy around. */
1393 master_clone = node->create_clone (node->decl, node->count,
1394 CGRAPH_FREQ_BASE, false, vNULL,
1395 true, NULL, NULL);
1396 for (e = master_clone->callees; e; e = e->next_callee)
1397 if (!e->inline_failed)
1398 clone_inlined_nodes (e, true, false, NULL, CGRAPH_FREQ_BASE);
1399 curr->redirect_callee (master_clone);
1400 reset_edge_growth_cache (curr);
1403 inline_call (curr, false, new_edges, &overall_size, true);
1404 lookup_recursive_calls (node, curr->callee, &heap);
1405 n++;
1408 if (!heap.empty () && dump_file)
1409 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1411 if (!master_clone)
1412 return false;
1414 if (dump_file)
1415 fprintf (dump_file,
1416 "\n Inlined %i times, "
1417 "body grown from size %i to %i, time %i to %i\n", n,
1418 inline_summary (master_clone)->size, inline_summary (node)->size,
1419 inline_summary (master_clone)->time, inline_summary (node)->time);
1421 /* Remove master clone we used for inlining. We rely that clones inlined
1422 into master clone gets queued just before master clone so we don't
1423 need recursion. */
1424 for (node = symtab->first_function (); node != master_clone;
1425 node = next)
1427 next = symtab->next_function (node);
1428 if (node->global.inlined_to == master_clone)
1429 node->remove ();
1431 master_clone->remove ();
1432 return true;
1436 /* Given whole compilation unit estimate of INSNS, compute how large we can
1437 allow the unit to grow. */
1439 static int
1440 compute_max_insns (int insns)
1442 int max_insns = insns;
1443 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1444 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1446 return ((int64_t) max_insns
1447 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1451 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1453 static void
1454 add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges)
1456 while (new_edges.length () > 0)
1458 struct cgraph_edge *edge = new_edges.pop ();
1460 gcc_assert (!edge->aux);
1461 if (edge->inline_failed
1462 && can_inline_edge_p (edge, true)
1463 && want_inline_small_function_p (edge, true))
1464 edge->aux = heap->insert (edge_badness (edge, false), edge);
1468 /* Remove EDGE from the fibheap. */
1470 static void
1471 heap_edge_removal_hook (struct cgraph_edge *e, void *data)
1473 if (e->callee)
1474 reset_node_growth_cache (e->callee);
1475 if (e->aux)
1477 ((edge_heap_t *)data)->delete_node ((edge_heap_node_t *)e->aux);
1478 e->aux = NULL;
1482 /* Return true if speculation of edge E seems useful.
1483 If ANTICIPATE_INLINING is true, be conservative and hope that E
1484 may get inlined. */
1486 bool
1487 speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
1489 enum availability avail;
1490 struct cgraph_node *target = e->callee->ultimate_alias_target (&avail);
1491 struct cgraph_edge *direct, *indirect;
1492 struct ipa_ref *ref;
1494 gcc_assert (e->speculative && !e->indirect_unknown_callee);
1496 if (!e->maybe_hot_p ())
1497 return false;
1499 /* See if IP optimizations found something potentially useful about the
1500 function. For now we look only for CONST/PURE flags. Almost everything
1501 else we propagate is useless. */
1502 if (avail >= AVAIL_AVAILABLE)
1504 int ecf_flags = flags_from_decl_or_type (target->decl);
1505 if (ecf_flags & ECF_CONST)
1507 e->speculative_call_info (direct, indirect, ref);
1508 if (!(indirect->indirect_info->ecf_flags & ECF_CONST))
1509 return true;
1511 else if (ecf_flags & ECF_PURE)
1513 e->speculative_call_info (direct, indirect, ref);
1514 if (!(indirect->indirect_info->ecf_flags & ECF_PURE))
1515 return true;
1518 /* If we did not managed to inline the function nor redirect
1519 to an ipa-cp clone (that are seen by having local flag set),
1520 it is probably pointless to inline it unless hardware is missing
1521 indirect call predictor. */
1522 if (!anticipate_inlining && e->inline_failed && !target->local.local)
1523 return false;
1524 /* For overwritable targets there is not much to do. */
1525 if (e->inline_failed && !can_inline_edge_p (e, false, true))
1526 return false;
1527 /* OK, speculation seems interesting. */
1528 return true;
1531 /* We know that EDGE is not going to be inlined.
1532 See if we can remove speculation. */
1534 static void
1535 resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
1537 if (edge->speculative && !speculation_useful_p (edge, false))
1539 struct cgraph_node *node = edge->caller;
1540 struct cgraph_node *where = node->global.inlined_to
1541 ? node->global.inlined_to : node;
1542 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1544 spec_rem += edge->count;
1545 edge->resolve_speculation ();
1546 reset_edge_caches (where);
1547 inline_update_overall_summary (where);
1548 update_caller_keys (edge_heap, where,
1549 updated_nodes, NULL);
1550 update_callee_keys (edge_heap, where,
1551 updated_nodes);
1552 BITMAP_FREE (updated_nodes);
1556 /* We use greedy algorithm for inlining of small functions:
1557 All inline candidates are put into prioritized heap ordered in
1558 increasing badness.
1560 The inlining of small functions is bounded by unit growth parameters. */
1562 static void
1563 inline_small_functions (void)
1565 struct cgraph_node *node;
1566 struct cgraph_edge *edge;
1567 edge_heap_t edge_heap (LONG_MIN);
1568 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1569 int min_size, max_size;
1570 auto_vec<cgraph_edge *> new_indirect_edges;
1571 int initial_size = 0;
1572 struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
1573 struct cgraph_edge_hook_list *edge_removal_hook_holder;
1574 new_indirect_edges.create (8);
1576 edge_removal_hook_holder
1577 = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap);
1579 /* Compute overall unit size and other global parameters used by badness
1580 metrics. */
1582 max_count = 0;
1583 ipa_reduced_postorder (order, true, true, NULL);
1584 free (order);
1586 FOR_EACH_DEFINED_FUNCTION (node)
1587 if (!node->global.inlined_to)
1589 if (node->has_gimple_body_p ()
1590 || node->thunk.thunk_p)
1592 struct inline_summary *info = inline_summary (node);
1593 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
1595 /* Do not account external functions, they will be optimized out
1596 if not inlined. Also only count the non-cold portion of program. */
1597 if (!DECL_EXTERNAL (node->decl)
1598 && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED)
1599 initial_size += info->size;
1600 info->growth = estimate_growth (node);
1601 if (dfs && dfs->next_cycle)
1603 struct cgraph_node *n2;
1604 int id = dfs->scc_no + 1;
1605 for (n2 = node; n2;
1606 n2 = ((struct ipa_dfs_info *) node->aux)->next_cycle)
1608 struct inline_summary *info2 = inline_summary (n2);
1609 if (info2->scc_no)
1610 break;
1611 info2->scc_no = id;
1616 for (edge = node->callers; edge; edge = edge->next_caller)
1617 if (max_count < edge->count)
1618 max_count = edge->count;
1620 max_count_real = sreal (max_count, 0);
1621 max_relbenefit_real = sreal (RELATIVE_TIME_BENEFIT_RANGE, 0);
1622 half_int_min_real = sreal (INT_MAX / 2, 0);
1623 ipa_free_postorder_info ();
1624 initialize_growth_caches ();
1626 if (dump_file)
1627 fprintf (dump_file,
1628 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1629 initial_size);
1631 overall_size = initial_size;
1632 max_size = compute_max_insns (overall_size);
1633 min_size = overall_size;
1635 /* Populate the heap with all edges we might inline. */
1637 FOR_EACH_DEFINED_FUNCTION (node)
1639 bool update = false;
1640 struct cgraph_edge *next;
1642 if (dump_file)
1643 fprintf (dump_file, "Enqueueing calls in %s/%i.\n",
1644 node->name (), node->order);
1646 for (edge = node->callees; edge; edge = next)
1648 next = edge->next_callee;
1649 if (edge->inline_failed
1650 && !edge->aux
1651 && can_inline_edge_p (edge, true)
1652 && want_inline_small_function_p (edge, true)
1653 && edge->inline_failed)
1655 gcc_assert (!edge->aux);
1656 update_edge_key (&edge_heap, edge);
1658 if (edge->speculative && !speculation_useful_p (edge, edge->aux != NULL))
1660 edge->resolve_speculation ();
1661 update = true;
1664 if (update)
1666 struct cgraph_node *where = node->global.inlined_to
1667 ? node->global.inlined_to : node;
1668 inline_update_overall_summary (where);
1669 reset_node_growth_cache (where);
1670 reset_edge_caches (where);
1671 update_caller_keys (&edge_heap, where,
1672 updated_nodes, NULL);
1673 bitmap_clear (updated_nodes);
1677 gcc_assert (in_lto_p
1678 || !max_count
1679 || (profile_info && flag_branch_probabilities));
1681 while (!edge_heap.empty ())
1683 int old_size = overall_size;
1684 struct cgraph_node *where, *callee;
1685 int badness = edge_heap.min_key ();
1686 int current_badness;
1687 int cached_badness;
1688 int growth;
1690 edge = edge_heap.extract_min ();
1691 gcc_assert (edge->aux);
1692 edge->aux = NULL;
1693 if (!edge->inline_failed || !edge->callee->analyzed)
1694 continue;
1696 /* Be sure that caches are maintained consistent.
1697 We can not make this ENABLE_CHECKING only because it cause different
1698 updates of the fibheap queue. */
1699 cached_badness = edge_badness (edge, false);
1700 reset_edge_growth_cache (edge);
1701 reset_node_growth_cache (edge->callee);
1703 /* When updating the edge costs, we only decrease badness in the keys.
1704 Increases of badness are handled lazilly; when we see key with out
1705 of date value on it, we re-insert it now. */
1706 current_badness = edge_badness (edge, false);
1707 gcc_assert (cached_badness == current_badness);
1708 gcc_assert (current_badness >= badness);
1709 if (current_badness != badness)
1711 edge->aux = edge_heap.insert (current_badness, edge);
1712 continue;
1715 if (!can_inline_edge_p (edge, true))
1717 resolve_noninline_speculation (&edge_heap, edge);
1718 continue;
1721 callee = edge->callee->ultimate_alias_target ();
1722 growth = estimate_edge_growth (edge);
1723 if (dump_file)
1725 fprintf (dump_file,
1726 "\nConsidering %s/%i with %i size\n",
1727 callee->name (), callee->order,
1728 inline_summary (callee)->size);
1729 fprintf (dump_file,
1730 " to be inlined into %s/%i in %s:%i\n"
1731 " Estimated badness is %i, frequency %.2f.\n",
1732 edge->caller->name (), edge->caller->order,
1733 flag_wpa ? "unknown"
1734 : gimple_filename ((const_gimple) edge->call_stmt),
1735 flag_wpa ? -1
1736 : gimple_lineno ((const_gimple) edge->call_stmt),
1737 badness,
1738 edge->frequency / (double)CGRAPH_FREQ_BASE);
1739 if (edge->count)
1740 fprintf (dump_file," Called %"PRId64"x\n",
1741 edge->count);
1742 if (dump_flags & TDF_DETAILS)
1743 edge_badness (edge, true);
1746 if (overall_size + growth > max_size
1747 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1749 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1750 report_inline_failed_reason (edge);
1751 resolve_noninline_speculation (&edge_heap, edge);
1752 continue;
1755 if (!want_inline_small_function_p (edge, true))
1757 resolve_noninline_speculation (&edge_heap, edge);
1758 continue;
1761 /* Heuristics for inlining small functions work poorly for
1762 recursive calls where we do effects similar to loop unrolling.
1763 When inlining such edge seems profitable, leave decision on
1764 specific inliner. */
1765 if (edge->recursive_p ())
1767 where = edge->caller;
1768 if (where->global.inlined_to)
1769 where = where->global.inlined_to;
1770 if (!recursive_inlining (edge,
1771 opt_for_fn (edge->caller->decl,
1772 flag_indirect_inlining)
1773 ? &new_indirect_edges : NULL))
1775 edge->inline_failed = CIF_RECURSIVE_INLINING;
1776 resolve_noninline_speculation (&edge_heap, edge);
1777 continue;
1779 reset_edge_caches (where);
1780 /* Recursive inliner inlines all recursive calls of the function
1781 at once. Consequently we need to update all callee keys. */
1782 if (opt_for_fn (edge->caller->decl, flag_indirect_inlining))
1783 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
1784 update_callee_keys (&edge_heap, where, updated_nodes);
1785 bitmap_clear (updated_nodes);
1787 else
1789 struct cgraph_node *outer_node = NULL;
1790 int depth = 0;
1792 /* Consider the case where self recursive function A is inlined
1793 into B. This is desired optimization in some cases, since it
1794 leads to effect similar of loop peeling and we might completely
1795 optimize out the recursive call. However we must be extra
1796 selective. */
1798 where = edge->caller;
1799 while (where->global.inlined_to)
1801 if (where->decl == callee->decl)
1802 outer_node = where, depth++;
1803 where = where->callers->caller;
1805 if (outer_node
1806 && !want_inline_self_recursive_call_p (edge, outer_node,
1807 true, depth))
1809 edge->inline_failed
1810 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
1811 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1812 resolve_noninline_speculation (&edge_heap, edge);
1813 continue;
1815 else if (depth && dump_file)
1816 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
1818 gcc_checking_assert (!callee->global.inlined_to);
1819 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
1820 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
1822 reset_edge_caches (edge->callee);
1823 reset_node_growth_cache (callee);
1825 update_callee_keys (&edge_heap, where, updated_nodes);
1827 where = edge->caller;
1828 if (where->global.inlined_to)
1829 where = where->global.inlined_to;
1831 /* Our profitability metric can depend on local properties
1832 such as number of inlinable calls and size of the function body.
1833 After inlining these properties might change for the function we
1834 inlined into (since it's body size changed) and for the functions
1835 called by function we inlined (since number of it inlinable callers
1836 might change). */
1837 update_caller_keys (&edge_heap, where, updated_nodes, NULL);
1838 bitmap_clear (updated_nodes);
1840 if (dump_file)
1842 fprintf (dump_file,
1843 " Inlined into %s which now has time %i and size %i,"
1844 "net change of %+i.\n",
1845 edge->caller->name (),
1846 inline_summary (edge->caller)->time,
1847 inline_summary (edge->caller)->size,
1848 overall_size - old_size);
1850 if (min_size > overall_size)
1852 min_size = overall_size;
1853 max_size = compute_max_insns (min_size);
1855 if (dump_file)
1856 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1860 free_growth_caches ();
1861 if (dump_file)
1862 fprintf (dump_file,
1863 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1864 initial_size, overall_size,
1865 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
1866 BITMAP_FREE (updated_nodes);
1867 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
1870 /* Flatten NODE. Performed both during early inlining and
1871 at IPA inlining time. */
1873 static void
1874 flatten_function (struct cgraph_node *node, bool early)
1876 struct cgraph_edge *e;
1878 /* We shouldn't be called recursively when we are being processed. */
1879 gcc_assert (node->aux == NULL);
1881 node->aux = (void *) node;
1883 for (e = node->callees; e; e = e->next_callee)
1885 struct cgraph_node *orig_callee;
1886 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
1888 /* We've hit cycle? It is time to give up. */
1889 if (callee->aux)
1891 if (dump_file)
1892 fprintf (dump_file,
1893 "Not inlining %s into %s to avoid cycle.\n",
1894 xstrdup (callee->name ()),
1895 xstrdup (e->caller->name ()));
1896 e->inline_failed = CIF_RECURSIVE_INLINING;
1897 continue;
1900 /* When the edge is already inlined, we just need to recurse into
1901 it in order to fully flatten the leaves. */
1902 if (!e->inline_failed)
1904 flatten_function (callee, early);
1905 continue;
1908 /* Flatten attribute needs to be processed during late inlining. For
1909 extra code quality we however do flattening during early optimization,
1910 too. */
1911 if (!early
1912 ? !can_inline_edge_p (e, true)
1913 : !can_early_inline_edge_p (e))
1914 continue;
1916 if (e->recursive_p ())
1918 if (dump_file)
1919 fprintf (dump_file, "Not inlining: recursive call.\n");
1920 continue;
1923 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1924 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
1926 if (dump_file)
1927 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1928 continue;
1931 /* Inline the edge and flatten the inline clone. Avoid
1932 recursing through the original node if the node was cloned. */
1933 if (dump_file)
1934 fprintf (dump_file, " Inlining %s into %s.\n",
1935 xstrdup (callee->name ()),
1936 xstrdup (e->caller->name ()));
1937 orig_callee = callee;
1938 inline_call (e, true, NULL, NULL, false);
1939 if (e->callee != orig_callee)
1940 orig_callee->aux = (void *) node;
1941 flatten_function (e->callee, early);
1942 if (e->callee != orig_callee)
1943 orig_callee->aux = NULL;
1946 node->aux = NULL;
1947 if (!node->global.inlined_to)
1948 inline_update_overall_summary (node);
1951 /* Count number of callers of NODE and store it into DATA (that
1952 points to int. Worker for cgraph_for_node_and_aliases. */
1954 static bool
1955 sum_callers (struct cgraph_node *node, void *data)
1957 struct cgraph_edge *e;
1958 int *num_calls = (int *)data;
1960 for (e = node->callers; e; e = e->next_caller)
1961 (*num_calls)++;
1962 return false;
1965 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
1966 DATA points to number of calls originally found so we avoid infinite
1967 recursion. */
1969 static bool
1970 inline_to_all_callers (struct cgraph_node *node, void *data)
1972 int *num_calls = (int *)data;
1973 bool callee_removed = false;
1975 while (node->callers && !node->global.inlined_to)
1977 struct cgraph_node *caller = node->callers->caller;
1979 if (dump_file)
1981 fprintf (dump_file,
1982 "\nInlining %s size %i.\n",
1983 node->name (),
1984 inline_summary (node)->size);
1985 fprintf (dump_file,
1986 " Called once from %s %i insns.\n",
1987 node->callers->caller->name (),
1988 inline_summary (node->callers->caller)->size);
1991 inline_call (node->callers, true, NULL, NULL, true, &callee_removed);
1992 if (dump_file)
1993 fprintf (dump_file,
1994 " Inlined into %s which now has %i size\n",
1995 caller->name (),
1996 inline_summary (caller)->size);
1997 if (!(*num_calls)--)
1999 if (dump_file)
2000 fprintf (dump_file, "New calls found; giving up.\n");
2001 return callee_removed;
2003 if (callee_removed)
2004 return true;
2006 return false;
2009 /* Output overall time estimate. */
2010 static void
2011 dump_overall_stats (void)
2013 int64_t sum_weighted = 0, sum = 0;
2014 struct cgraph_node *node;
2016 FOR_EACH_DEFINED_FUNCTION (node)
2017 if (!node->global.inlined_to
2018 && !node->alias)
2020 int time = inline_summary (node)->time;
2021 sum += time;
2022 sum_weighted += time * node->count;
2024 fprintf (dump_file, "Overall time estimate: "
2025 "%"PRId64" weighted by profile: "
2026 "%"PRId64"\n", sum, sum_weighted);
2029 /* Output some useful stats about inlining. */
2031 static void
2032 dump_inline_stats (void)
2034 int64_t inlined_cnt = 0, inlined_indir_cnt = 0;
2035 int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0;
2036 int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0;
2037 int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
2038 int64_t inlined_speculative = 0, inlined_speculative_ply = 0;
2039 int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
2040 int64_t reason[CIF_N_REASONS][3];
2041 int i;
2042 struct cgraph_node *node;
2044 memset (reason, 0, sizeof (reason));
2045 FOR_EACH_DEFINED_FUNCTION (node)
2047 struct cgraph_edge *e;
2048 for (e = node->callees; e; e = e->next_callee)
2050 if (e->inline_failed)
2052 reason[(int) e->inline_failed][0] += e->count;
2053 reason[(int) e->inline_failed][1] += e->frequency;
2054 reason[(int) e->inline_failed][2] ++;
2055 if (DECL_VIRTUAL_P (e->callee->decl))
2057 if (e->indirect_inlining_edge)
2058 noninlined_virt_indir_cnt += e->count;
2059 else
2060 noninlined_virt_cnt += e->count;
2062 else
2064 if (e->indirect_inlining_edge)
2065 noninlined_indir_cnt += e->count;
2066 else
2067 noninlined_cnt += e->count;
2070 else
2072 if (e->speculative)
2074 if (DECL_VIRTUAL_P (e->callee->decl))
2075 inlined_speculative_ply += e->count;
2076 else
2077 inlined_speculative += e->count;
2079 else if (DECL_VIRTUAL_P (e->callee->decl))
2081 if (e->indirect_inlining_edge)
2082 inlined_virt_indir_cnt += e->count;
2083 else
2084 inlined_virt_cnt += e->count;
2086 else
2088 if (e->indirect_inlining_edge)
2089 inlined_indir_cnt += e->count;
2090 else
2091 inlined_cnt += e->count;
2095 for (e = node->indirect_calls; e; e = e->next_callee)
2096 if (e->indirect_info->polymorphic)
2097 indirect_poly_cnt += e->count;
2098 else
2099 indirect_cnt += e->count;
2101 if (max_count)
2103 fprintf (dump_file,
2104 "Inlined %"PRId64 " + speculative "
2105 "%"PRId64 " + speculative polymorphic "
2106 "%"PRId64 " + previously indirect "
2107 "%"PRId64 " + virtual "
2108 "%"PRId64 " + virtual and previously indirect "
2109 "%"PRId64 "\n" "Not inlined "
2110 "%"PRId64 " + previously indirect "
2111 "%"PRId64 " + virtual "
2112 "%"PRId64 " + virtual and previously indirect "
2113 "%"PRId64 " + stil indirect "
2114 "%"PRId64 " + still indirect polymorphic "
2115 "%"PRId64 "\n", inlined_cnt,
2116 inlined_speculative, inlined_speculative_ply,
2117 inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
2118 noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
2119 noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
2120 fprintf (dump_file,
2121 "Removed speculations %"PRId64 "\n",
2122 spec_rem);
2124 dump_overall_stats ();
2125 fprintf (dump_file, "\nWhy inlining failed?\n");
2126 for (i = 0; i < CIF_N_REASONS; i++)
2127 if (reason[i][2])
2128 fprintf (dump_file, "%-50s: %8i calls, %8i freq, %"PRId64" count\n",
2129 cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
2130 (int) reason[i][2], (int) reason[i][1], reason[i][0]);
2133 /* Decide on the inlining. We do so in the topological order to avoid
2134 expenses on updating data structures. */
2136 static unsigned int
2137 ipa_inline (void)
2139 struct cgraph_node *node;
2140 int nnodes;
2141 struct cgraph_node **order;
2142 int i;
2143 int cold;
2144 bool remove_functions = false;
2146 if (!optimize)
2147 return 0;
2149 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
2151 if (in_lto_p && optimize)
2152 ipa_update_after_lto_read ();
2154 if (dump_file)
2155 dump_inline_summaries (dump_file);
2157 nnodes = ipa_reverse_postorder (order);
2159 FOR_EACH_FUNCTION (node)
2160 node->aux = 0;
2162 if (dump_file)
2163 fprintf (dump_file, "\nFlattening functions:\n");
2165 /* In the first pass handle functions to be flattened. Do this with
2166 a priority so none of our later choices will make this impossible. */
2167 for (i = nnodes - 1; i >= 0; i--)
2169 node = order[i];
2171 /* Handle nodes to be flattened.
2172 Ideally when processing callees we stop inlining at the
2173 entry of cycles, possibly cloning that entry point and
2174 try to flatten itself turning it into a self-recursive
2175 function. */
2176 if (lookup_attribute ("flatten",
2177 DECL_ATTRIBUTES (node->decl)) != NULL)
2179 if (dump_file)
2180 fprintf (dump_file,
2181 "Flattening %s\n", node->name ());
2182 flatten_function (node, false);
2185 if (dump_file)
2186 dump_overall_stats ();
2188 inline_small_functions ();
2190 /* Do first after-inlining removal. We want to remove all "stale" extern inline
2191 functions and virtual functions so we really know what is called once. */
2192 symtab->remove_unreachable_nodes (false, dump_file);
2193 free (order);
2195 /* Inline functions with a property that after inlining into all callers the
2196 code size will shrink because the out-of-line copy is eliminated.
2197 We do this regardless on the callee size as long as function growth limits
2198 are met. */
2199 if (dump_file)
2200 fprintf (dump_file,
2201 "\nDeciding on functions to be inlined into all callers and removing useless speculations:\n");
2203 /* Inlining one function called once has good chance of preventing
2204 inlining other function into the same callee. Ideally we should
2205 work in priority order, but probably inlining hot functions first
2206 is good cut without the extra pain of maintaining the queue.
2208 ??? this is not really fitting the bill perfectly: inlining function
2209 into callee often leads to better optimization of callee due to
2210 increased context for optimization.
2211 For example if main() function calls a function that outputs help
2212 and then function that does the main optmization, we should inline
2213 the second with priority even if both calls are cold by themselves.
2215 We probably want to implement new predicate replacing our use of
2216 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2217 to be hot. */
2218 for (cold = 0; cold <= 1; cold ++)
2220 FOR_EACH_DEFINED_FUNCTION (node)
2222 struct cgraph_edge *edge, *next;
2223 bool update=false;
2225 for (edge = node->callees; edge; edge = next)
2227 next = edge->next_callee;
2228 if (edge->speculative && !speculation_useful_p (edge, false))
2230 edge->resolve_speculation ();
2231 spec_rem += edge->count;
2232 update = true;
2233 remove_functions = true;
2236 if (update)
2238 struct cgraph_node *where = node->global.inlined_to
2239 ? node->global.inlined_to : node;
2240 reset_node_growth_cache (where);
2241 reset_edge_caches (where);
2242 inline_update_overall_summary (where);
2244 if (want_inline_function_to_all_callers_p (node, cold))
2246 int num_calls = 0;
2247 node->call_for_symbol_thunks_and_aliases (sum_callers, &num_calls,
2248 true);
2249 while (node->call_for_symbol_thunks_and_aliases (inline_to_all_callers,
2250 &num_calls, true))
2252 remove_functions = true;
2257 /* Free ipa-prop structures if they are no longer needed. */
2258 if (optimize)
2259 ipa_free_all_structures_after_iinln ();
2261 if (dump_file)
2263 fprintf (dump_file,
2264 "\nInlined %i calls, eliminated %i functions\n\n",
2265 ncalls_inlined, nfunctions_inlined);
2266 dump_inline_stats ();
2269 if (dump_file)
2270 dump_inline_summaries (dump_file);
2271 /* In WPA we use inline summaries for partitioning process. */
2272 if (!flag_wpa)
2273 inline_free_summary ();
2274 return remove_functions ? TODO_remove_functions : 0;
2277 /* Inline always-inline function calls in NODE. */
2279 static bool
2280 inline_always_inline_functions (struct cgraph_node *node)
2282 struct cgraph_edge *e;
2283 bool inlined = false;
2285 for (e = node->callees; e; e = e->next_callee)
2287 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2288 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
2289 continue;
2291 if (e->recursive_p ())
2293 if (dump_file)
2294 fprintf (dump_file, " Not inlining recursive call to %s.\n",
2295 e->callee->name ());
2296 e->inline_failed = CIF_RECURSIVE_INLINING;
2297 continue;
2300 if (!can_early_inline_edge_p (e))
2302 /* Set inlined to true if the callee is marked "always_inline" but
2303 is not inlinable. This will allow flagging an error later in
2304 expand_call_inline in tree-inline.c. */
2305 if (lookup_attribute ("always_inline",
2306 DECL_ATTRIBUTES (callee->decl)) != NULL)
2307 inlined = true;
2308 continue;
2311 if (dump_file)
2312 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
2313 xstrdup (e->callee->name ()),
2314 xstrdup (e->caller->name ()));
2315 inline_call (e, true, NULL, NULL, false);
2316 inlined = true;
2318 if (inlined)
2319 inline_update_overall_summary (node);
2321 return inlined;
2324 /* Decide on the inlining. We do so in the topological order to avoid
2325 expenses on updating data structures. */
2327 static bool
2328 early_inline_small_functions (struct cgraph_node *node)
2330 struct cgraph_edge *e;
2331 bool inlined = false;
2333 for (e = node->callees; e; e = e->next_callee)
2335 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2336 if (!inline_summary (callee)->inlinable
2337 || !e->inline_failed)
2338 continue;
2340 /* Do not consider functions not declared inline. */
2341 if (!DECL_DECLARED_INLINE_P (callee->decl)
2342 && !opt_for_fn (node->decl, flag_inline_small_functions)
2343 && !opt_for_fn (node->decl, flag_inline_functions))
2344 continue;
2346 if (dump_file)
2347 fprintf (dump_file, "Considering inline candidate %s.\n",
2348 callee->name ());
2350 if (!can_early_inline_edge_p (e))
2351 continue;
2353 if (e->recursive_p ())
2355 if (dump_file)
2356 fprintf (dump_file, " Not inlining: recursive call.\n");
2357 continue;
2360 if (!want_early_inline_function_p (e))
2361 continue;
2363 if (dump_file)
2364 fprintf (dump_file, " Inlining %s into %s.\n",
2365 xstrdup (callee->name ()),
2366 xstrdup (e->caller->name ()));
2367 inline_call (e, true, NULL, NULL, true);
2368 inlined = true;
2371 return inlined;
2374 unsigned int
2375 early_inliner (function *fun)
2377 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2378 struct cgraph_edge *edge;
2379 unsigned int todo = 0;
2380 int iterations = 0;
2381 bool inlined = false;
2383 if (seen_error ())
2384 return 0;
2386 /* Do nothing if datastructures for ipa-inliner are already computed. This
2387 happens when some pass decides to construct new function and
2388 cgraph_add_new_function calls lowering passes and early optimization on
2389 it. This may confuse ourself when early inliner decide to inline call to
2390 function clone, because function clones don't have parameter list in
2391 ipa-prop matching their signature. */
2392 if (ipa_node_params_vector.exists ())
2393 return 0;
2395 #ifdef ENABLE_CHECKING
2396 node->verify ();
2397 #endif
2398 node->remove_all_references ();
2400 /* Even when not optimizing or not inlining inline always-inline
2401 functions. */
2402 inlined = inline_always_inline_functions (node);
2404 if (!optimize
2405 || flag_no_inline
2406 || !flag_early_inlining
2407 /* Never inline regular functions into always-inline functions
2408 during incremental inlining. This sucks as functions calling
2409 always inline functions will get less optimized, but at the
2410 same time inlining of functions calling always inline
2411 function into an always inline function might introduce
2412 cycles of edges to be always inlined in the callgraph.
2414 We might want to be smarter and just avoid this type of inlining. */
2415 || DECL_DISREGARD_INLINE_LIMITS (node->decl))
2417 else if (lookup_attribute ("flatten",
2418 DECL_ATTRIBUTES (node->decl)) != NULL)
2420 /* When the function is marked to be flattened, recursively inline
2421 all calls in it. */
2422 if (dump_file)
2423 fprintf (dump_file,
2424 "Flattening %s\n", node->name ());
2425 flatten_function (node, true);
2426 inlined = true;
2428 else
2430 /* We iterate incremental inlining to get trivial cases of indirect
2431 inlining. */
2432 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
2433 && early_inline_small_functions (node))
2435 timevar_push (TV_INTEGRATION);
2436 todo |= optimize_inline_calls (current_function_decl);
2438 /* Technically we ought to recompute inline parameters so the new
2439 iteration of early inliner works as expected. We however have
2440 values approximately right and thus we only need to update edge
2441 info that might be cleared out for newly discovered edges. */
2442 for (edge = node->callees; edge; edge = edge->next_callee)
2444 /* We have no summary for new bound store calls yet. */
2445 if (inline_edge_summary_vec.length () > (unsigned)edge->uid)
2447 struct inline_edge_summary *es = inline_edge_summary (edge);
2448 es->call_stmt_size
2449 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2450 es->call_stmt_time
2451 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2453 if (edge->callee->decl
2454 && !gimple_check_call_matching_types (
2455 edge->call_stmt, edge->callee->decl, false))
2456 edge->call_stmt_cannot_inline_p = true;
2458 if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
2459 inline_update_overall_summary (node);
2460 timevar_pop (TV_INTEGRATION);
2461 iterations++;
2462 inlined = false;
2464 if (dump_file)
2465 fprintf (dump_file, "Iterations: %i\n", iterations);
2468 if (inlined)
2470 timevar_push (TV_INTEGRATION);
2471 todo |= optimize_inline_calls (current_function_decl);
2472 timevar_pop (TV_INTEGRATION);
2475 fun->always_inline_functions_inlined = true;
2477 return todo;
2480 /* Do inlining of small functions. Doing so early helps profiling and other
2481 passes to be somewhat more effective and avoids some code duplication in
2482 later real inlining pass for testcases with very many function calls. */
2484 namespace {
2486 const pass_data pass_data_early_inline =
2488 GIMPLE_PASS, /* type */
2489 "einline", /* name */
2490 OPTGROUP_INLINE, /* optinfo_flags */
2491 TV_EARLY_INLINING, /* tv_id */
2492 PROP_ssa, /* properties_required */
2493 0, /* properties_provided */
2494 0, /* properties_destroyed */
2495 0, /* todo_flags_start */
2496 0, /* todo_flags_finish */
2499 class pass_early_inline : public gimple_opt_pass
2501 public:
2502 pass_early_inline (gcc::context *ctxt)
2503 : gimple_opt_pass (pass_data_early_inline, ctxt)
2506 /* opt_pass methods: */
2507 virtual unsigned int execute (function *);
2509 }; // class pass_early_inline
2511 unsigned int
2512 pass_early_inline::execute (function *fun)
2514 return early_inliner (fun);
2517 } // anon namespace
2519 gimple_opt_pass *
2520 make_pass_early_inline (gcc::context *ctxt)
2522 return new pass_early_inline (ctxt);
2525 namespace {
2527 const pass_data pass_data_ipa_inline =
2529 IPA_PASS, /* type */
2530 "inline", /* name */
2531 OPTGROUP_INLINE, /* optinfo_flags */
2532 TV_IPA_INLINING, /* tv_id */
2533 0, /* properties_required */
2534 0, /* properties_provided */
2535 0, /* properties_destroyed */
2536 0, /* todo_flags_start */
2537 ( TODO_dump_symtab ), /* todo_flags_finish */
2540 class pass_ipa_inline : public ipa_opt_pass_d
2542 public:
2543 pass_ipa_inline (gcc::context *ctxt)
2544 : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
2545 inline_generate_summary, /* generate_summary */
2546 inline_write_summary, /* write_summary */
2547 inline_read_summary, /* read_summary */
2548 NULL, /* write_optimization_summary */
2549 NULL, /* read_optimization_summary */
2550 NULL, /* stmt_fixup */
2551 0, /* function_transform_todo_flags_start */
2552 inline_transform, /* function_transform */
2553 NULL) /* variable_transform */
2556 /* opt_pass methods: */
2557 virtual unsigned int execute (function *) { return ipa_inline (); }
2559 }; // class pass_ipa_inline
2561 } // anon namespace
2563 ipa_opt_pass_d *
2564 make_pass_ipa_inline (gcc::context *ctxt)
2566 return new pass_ipa_inline (ctxt);