* c-parser.c (c_parser_compound_statement_nostart): Remove redundant
[official-gcc.git] / gcc / ipa-inline.c
blob0a8e535c3a7db8a2df067e039d944a6275a5ed87
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
29 on).
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
34 inlining.
36 inlining heuristics
38 The inliner itself is split into two passes:
40 pass_early_inlining
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
55 flattening.
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
60 optimizers.
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
68 pass_ipa_inline
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "backend.h"
96 #include "target.h"
97 #include "rtl.h"
98 #include "tree.h"
99 #include "gimple.h"
100 #include "alloc-pool.h"
101 #include "tree-pass.h"
102 #include "gimple-ssa.h"
103 #include "cgraph.h"
104 #include "lto-streamer.h"
105 #include "trans-mem.h"
106 #include "calls.h"
107 #include "tree-inline.h"
108 #include "params.h"
109 #include "profile.h"
110 #include "symbol-summary.h"
111 #include "tree-vrp.h"
112 #include "ipa-prop.h"
113 #include "ipa-fnsummary.h"
114 #include "ipa-inline.h"
115 #include "ipa-utils.h"
116 #include "sreal.h"
117 #include "auto-profile.h"
118 #include "builtins.h"
119 #include "fibonacci_heap.h"
121 typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
122 typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
124 /* Statistics we collect about inlining algorithm. */
125 static int overall_size;
126 static gcov_type max_count;
127 static gcov_type spec_rem;
129 /* Pre-computed constants 1/CGRAPH_FREQ_BASE and 1/100. */
130 static sreal cgraph_freq_base_rec, percent_rec;
132 /* Return false when inlining edge E would lead to violating
133 limits on function unit growth or stack usage growth.
135 The relative function body growth limit is present generally
136 to avoid problems with non-linear behavior of the compiler.
137 To allow inlining huge functions into tiny wrapper, the limit
138 is always based on the bigger of the two functions considered.
140 For stack growth limits we always base the growth in stack usage
141 of the callers. We want to prevent applications from segfaulting
142 on stack overflow when functions with huge stack frames gets
143 inlined. */
145 static bool
146 caller_growth_limits (struct cgraph_edge *e)
148 struct cgraph_node *to = e->caller;
149 struct cgraph_node *what = e->callee->ultimate_alias_target ();
150 int newsize;
151 int limit = 0;
152 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
153 inline_summary *info, *what_info, *outer_info = inline_summaries->get (to);
155 /* Look for function e->caller is inlined to. While doing
156 so work out the largest function body on the way. As
157 described above, we want to base our function growth
158 limits based on that. Not on the self size of the
159 outer function, not on the self size of inline code
160 we immediately inline to. This is the most relaxed
161 interpretation of the rule "do not grow large functions
162 too much in order to prevent compiler from exploding". */
163 while (true)
165 info = inline_summaries->get (to);
166 if (limit < info->self_size)
167 limit = info->self_size;
168 if (stack_size_limit < info->estimated_self_stack_size)
169 stack_size_limit = info->estimated_self_stack_size;
170 if (to->global.inlined_to)
171 to = to->callers->caller;
172 else
173 break;
176 what_info = inline_summaries->get (what);
178 if (limit < what_info->self_size)
179 limit = what_info->self_size;
181 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
183 /* Check the size after inlining against the function limits. But allow
184 the function to shrink if it went over the limits by forced inlining. */
185 newsize = estimate_size_after_inlining (to, e);
186 if (newsize >= info->size
187 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
188 && newsize > limit)
190 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
191 return false;
194 if (!what_info->estimated_stack_size)
195 return true;
197 /* FIXME: Stack size limit often prevents inlining in Fortran programs
198 due to large i/o datastructures used by the Fortran front-end.
199 We ought to ignore this limit when we know that the edge is executed
200 on every invocation of the caller (i.e. its call statement dominates
201 exit block). We do not track this information, yet. */
202 stack_size_limit += ((gcov_type)stack_size_limit
203 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
205 inlined_stack = (outer_info->stack_frame_offset
206 + outer_info->estimated_self_stack_size
207 + what_info->estimated_stack_size);
208 /* Check new stack consumption with stack consumption at the place
209 stack is used. */
210 if (inlined_stack > stack_size_limit
211 /* If function already has large stack usage from sibling
212 inline call, we can inline, too.
213 This bit overoptimistically assume that we are good at stack
214 packing. */
215 && inlined_stack > info->estimated_stack_size
216 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
218 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
219 return false;
221 return true;
224 /* Dump info about why inlining has failed. */
226 static void
227 report_inline_failed_reason (struct cgraph_edge *e)
229 if (dump_file)
231 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
232 xstrdup_for_dump (e->caller->name ()), e->caller->order,
233 xstrdup_for_dump (e->callee->name ()), e->callee->order,
234 cgraph_inline_failed_string (e->inline_failed));
235 if ((e->inline_failed == CIF_TARGET_OPTION_MISMATCH
236 || e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
237 && e->caller->lto_file_data
238 && e->callee->ultimate_alias_target ()->lto_file_data)
240 fprintf (dump_file, " LTO objects: %s, %s\n",
241 e->caller->lto_file_data->file_name,
242 e->callee->ultimate_alias_target ()->lto_file_data->file_name);
244 if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH)
245 cl_target_option_print_diff
246 (dump_file, 2, target_opts_for_fn (e->caller->decl),
247 target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
248 if (e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
249 cl_optimization_print_diff
250 (dump_file, 2, opts_for_fn (e->caller->decl),
251 opts_for_fn (e->callee->ultimate_alias_target ()->decl));
255 /* Decide whether sanitizer-related attributes allow inlining. */
257 static bool
258 sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
260 /* Don't care if sanitizer is disabled */
261 if (!(flag_sanitize & SANITIZE_ADDRESS))
262 return true;
264 if (!caller || !callee)
265 return true;
267 return !!lookup_attribute ("no_sanitize_address",
268 DECL_ATTRIBUTES (caller)) ==
269 !!lookup_attribute ("no_sanitize_address",
270 DECL_ATTRIBUTES (callee));
273 /* Used for flags where it is safe to inline when caller's value is
274 grater than callee's. */
275 #define check_maybe_up(flag) \
276 (opts_for_fn (caller->decl)->x_##flag \
277 != opts_for_fn (callee->decl)->x_##flag \
278 && (!always_inline \
279 || opts_for_fn (caller->decl)->x_##flag \
280 < opts_for_fn (callee->decl)->x_##flag))
281 /* Used for flags where it is safe to inline when caller's value is
282 smaller than callee's. */
283 #define check_maybe_down(flag) \
284 (opts_for_fn (caller->decl)->x_##flag \
285 != opts_for_fn (callee->decl)->x_##flag \
286 && (!always_inline \
287 || opts_for_fn (caller->decl)->x_##flag \
288 > opts_for_fn (callee->decl)->x_##flag))
289 /* Used for flags where exact match is needed for correctness. */
290 #define check_match(flag) \
291 (opts_for_fn (caller->decl)->x_##flag \
292 != opts_for_fn (callee->decl)->x_##flag)
294 /* Decide if we can inline the edge and possibly update
295 inline_failed reason.
296 We check whether inlining is possible at all and whether
297 caller growth limits allow doing so.
299 if REPORT is true, output reason to the dump file.
301 if DISREGARD_LIMITS is true, ignore size limits.*/
303 static bool
304 can_inline_edge_p (struct cgraph_edge *e, bool report,
305 bool disregard_limits = false, bool early = false)
307 gcc_checking_assert (e->inline_failed);
309 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
311 if (report)
312 report_inline_failed_reason (e);
313 return false;
316 bool inlinable = true;
317 enum availability avail;
318 cgraph_node *caller = e->caller->global.inlined_to
319 ? e->caller->global.inlined_to : e->caller;
320 cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller);
321 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl);
322 tree callee_tree
323 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
325 if (!callee->definition)
327 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
328 inlinable = false;
330 else if (callee->calls_comdat_local)
332 e->inline_failed = CIF_USES_COMDAT_LOCAL;
333 inlinable = false;
335 else if (avail <= AVAIL_INTERPOSABLE)
337 e->inline_failed = CIF_OVERWRITABLE;
338 inlinable = false;
340 /* All edges with call_stmt_cannot_inline_p should have inline_failed
341 initialized to one of FINAL_ERROR reasons. */
342 else if (e->call_stmt_cannot_inline_p)
343 gcc_unreachable ();
344 /* Don't inline if the functions have different EH personalities. */
345 else if (DECL_FUNCTION_PERSONALITY (caller->decl)
346 && DECL_FUNCTION_PERSONALITY (callee->decl)
347 && (DECL_FUNCTION_PERSONALITY (caller->decl)
348 != DECL_FUNCTION_PERSONALITY (callee->decl)))
350 e->inline_failed = CIF_EH_PERSONALITY;
351 inlinable = false;
353 /* TM pure functions should not be inlined into non-TM_pure
354 functions. */
355 else if (is_tm_pure (callee->decl) && !is_tm_pure (caller->decl))
357 e->inline_failed = CIF_UNSPECIFIED;
358 inlinable = false;
360 /* Check compatibility of target optimization options. */
361 else if (!targetm.target_option.can_inline_p (caller->decl,
362 callee->decl))
364 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
365 inlinable = false;
367 else if (!inline_summaries->get (callee)->inlinable)
369 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
370 inlinable = false;
372 /* Don't inline a function with mismatched sanitization attributes. */
373 else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl))
375 e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
376 inlinable = false;
378 /* Check if caller growth allows the inlining. */
379 else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
380 && !disregard_limits
381 && !lookup_attribute ("flatten",
382 DECL_ATTRIBUTES (caller->decl))
383 && !caller_growth_limits (e))
384 inlinable = false;
385 /* Don't inline a function with a higher optimization level than the
386 caller. FIXME: this is really just tip of iceberg of handling
387 optimization attribute. */
388 else if (caller_tree != callee_tree)
390 bool always_inline =
391 (DECL_DISREGARD_INLINE_LIMITS (callee->decl)
392 && lookup_attribute ("always_inline",
393 DECL_ATTRIBUTES (callee->decl)));
394 inline_summary *caller_info = inline_summaries->get (caller);
395 inline_summary *callee_info = inline_summaries->get (callee);
397 /* Until GCC 4.9 we did not check the semantics alterning flags
398 bellow and inline across optimization boundry.
399 Enabling checks bellow breaks several packages by refusing
400 to inline library always_inline functions. See PR65873.
401 Disable the check for early inlining for now until better solution
402 is found. */
403 if (always_inline && early)
405 /* There are some options that change IL semantics which means
406 we cannot inline in these cases for correctness reason.
407 Not even for always_inline declared functions. */
408 else if (check_match (flag_wrapv)
409 || check_match (flag_trapv)
410 /* When caller or callee does FP math, be sure FP codegen flags
411 compatible. */
412 || ((caller_info->fp_expressions && callee_info->fp_expressions)
413 && (check_maybe_up (flag_rounding_math)
414 || check_maybe_up (flag_trapping_math)
415 || check_maybe_down (flag_unsafe_math_optimizations)
416 || check_maybe_down (flag_finite_math_only)
417 || check_maybe_up (flag_signaling_nans)
418 || check_maybe_down (flag_cx_limited_range)
419 || check_maybe_up (flag_signed_zeros)
420 || check_maybe_down (flag_associative_math)
421 || check_maybe_down (flag_reciprocal_math)
422 || check_maybe_down (flag_fp_int_builtin_inexact)
423 /* Strictly speaking only when the callee contains function
424 calls that may end up setting errno. */
425 || check_maybe_up (flag_errno_math)))
426 /* We do not want to make code compiled with exceptions to be
427 brought into a non-EH function unless we know that the callee
428 does not throw.
429 This is tracked by DECL_FUNCTION_PERSONALITY. */
430 || (check_maybe_up (flag_non_call_exceptions)
431 && DECL_FUNCTION_PERSONALITY (callee->decl))
432 || (check_maybe_up (flag_exceptions)
433 && DECL_FUNCTION_PERSONALITY (callee->decl))
434 /* When devirtualization is diabled for callee, it is not safe
435 to inline it as we possibly mangled the type info.
436 Allow early inlining of always inlines. */
437 || (!early && check_maybe_down (flag_devirtualize)))
439 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
440 inlinable = false;
442 /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */
443 else if (always_inline)
445 /* When user added an attribute to the callee honor it. */
446 else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee->decl))
447 && opts_for_fn (caller->decl) != opts_for_fn (callee->decl))
449 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
450 inlinable = false;
452 /* If explicit optimize attribute are not used, the mismatch is caused
453 by different command line options used to build different units.
454 Do not care about COMDAT functions - those are intended to be
455 optimized with the optimization flags of module they are used in.
456 Also do not care about mixing up size/speed optimization when
457 DECL_DISREGARD_INLINE_LIMITS is set. */
458 else if ((callee->merged_comdat
459 && !lookup_attribute ("optimize",
460 DECL_ATTRIBUTES (caller->decl)))
461 || DECL_DISREGARD_INLINE_LIMITS (callee->decl))
463 /* If mismatch is caused by merging two LTO units with different
464 optimizationflags we want to be bit nicer. However never inline
465 if one of functions is not optimized at all. */
466 else if (!opt_for_fn (callee->decl, optimize)
467 || !opt_for_fn (caller->decl, optimize))
469 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
470 inlinable = false;
472 /* If callee is optimized for size and caller is not, allow inlining if
473 code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee
474 is inline (and thus likely an unified comdat). This will allow caller
475 to run faster. */
476 else if (opt_for_fn (callee->decl, optimize_size)
477 > opt_for_fn (caller->decl, optimize_size))
479 int growth = estimate_edge_growth (e);
480 if (growth > 0
481 && (!DECL_DECLARED_INLINE_P (callee->decl)
482 && growth >= MAX (MAX_INLINE_INSNS_SINGLE,
483 MAX_INLINE_INSNS_AUTO)))
485 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
486 inlinable = false;
489 /* If callee is more aggressively optimized for performance than caller,
490 we generally want to inline only cheap (runtime wise) functions. */
491 else if (opt_for_fn (callee->decl, optimize_size)
492 < opt_for_fn (caller->decl, optimize_size)
493 || (opt_for_fn (callee->decl, optimize)
494 > opt_for_fn (caller->decl, optimize)))
496 if (estimate_edge_time (e)
497 >= 20 + ipa_call_summaries->get (e)->call_stmt_time)
499 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
500 inlinable = false;
506 if (!inlinable && report)
507 report_inline_failed_reason (e);
508 return inlinable;
512 /* Return true if the edge E is inlinable during early inlining. */
514 static bool
515 can_early_inline_edge_p (struct cgraph_edge *e)
517 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
518 /* Early inliner might get called at WPA stage when IPA pass adds new
519 function. In this case we can not really do any of early inlining
520 because function bodies are missing. */
521 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
522 return false;
523 if (!gimple_has_body_p (callee->decl))
525 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
526 return false;
528 /* In early inliner some of callees may not be in SSA form yet
529 (i.e. the callgraph is cyclic and we did not process
530 the callee by early inliner, yet). We don't have CIF code for this
531 case; later we will re-do the decision in the real inliner. */
532 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
533 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
535 if (dump_file)
536 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
537 return false;
539 if (!can_inline_edge_p (e, true, false, true))
540 return false;
541 return true;
545 /* Return number of calls in N. Ignore cheap builtins. */
547 static int
548 num_calls (struct cgraph_node *n)
550 struct cgraph_edge *e;
551 int num = 0;
553 for (e = n->callees; e; e = e->next_callee)
554 if (!is_inexpensive_builtin (e->callee->decl))
555 num++;
556 return num;
560 /* Return true if we are interested in inlining small function. */
562 static bool
563 want_early_inline_function_p (struct cgraph_edge *e)
565 bool want_inline = true;
566 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
568 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
570 /* For AutoFDO, we need to make sure that before profile summary, all
571 hot paths' IR look exactly the same as profiled binary. As a result,
572 in einliner, we will disregard size limit and inline those callsites
573 that are:
574 * inlined in the profiled binary, and
575 * the cloned callee has enough samples to be considered "hot". */
576 else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e))
578 else if (!DECL_DECLARED_INLINE_P (callee->decl)
579 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
581 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
582 report_inline_failed_reason (e);
583 want_inline = false;
585 else
587 int growth = estimate_edge_growth (e);
588 int n;
590 if (growth <= 0)
592 else if (!e->maybe_hot_p ()
593 && growth > 0)
595 if (dump_file)
596 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
597 "call is cold and code would grow by %i\n",
598 xstrdup_for_dump (e->caller->name ()),
599 e->caller->order,
600 xstrdup_for_dump (callee->name ()), callee->order,
601 growth);
602 want_inline = false;
604 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
606 if (dump_file)
607 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
608 "growth %i exceeds --param early-inlining-insns\n",
609 xstrdup_for_dump (e->caller->name ()),
610 e->caller->order,
611 xstrdup_for_dump (callee->name ()), callee->order,
612 growth);
613 want_inline = false;
615 else if ((n = num_calls (callee)) != 0
616 && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
618 if (dump_file)
619 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
620 "growth %i exceeds --param early-inlining-insns "
621 "divided by number of calls\n",
622 xstrdup_for_dump (e->caller->name ()),
623 e->caller->order,
624 xstrdup_for_dump (callee->name ()), callee->order,
625 growth);
626 want_inline = false;
629 return want_inline;
632 /* Compute time of the edge->caller + edge->callee execution when inlining
633 does not happen. */
635 inline sreal
636 compute_uninlined_call_time (struct cgraph_edge *edge,
637 sreal uninlined_call_time)
639 cgraph_node *caller = (edge->caller->global.inlined_to
640 ? edge->caller->global.inlined_to
641 : edge->caller);
643 if (edge->count && caller->count)
644 uninlined_call_time *= (sreal)edge->count / caller->count;
645 if (edge->frequency)
646 uninlined_call_time *= cgraph_freq_base_rec * edge->frequency;
647 else
648 uninlined_call_time = uninlined_call_time >> 11;
650 sreal caller_time = inline_summaries->get (caller)->time;
651 return uninlined_call_time + caller_time;
654 /* Same as compute_uinlined_call_time but compute time when inlining
655 does happen. */
657 inline sreal
658 compute_inlined_call_time (struct cgraph_edge *edge,
659 sreal time)
661 cgraph_node *caller = (edge->caller->global.inlined_to
662 ? edge->caller->global.inlined_to
663 : edge->caller);
664 sreal caller_time = inline_summaries->get (caller)->time;
666 if (edge->count && caller->count)
667 time *= (sreal)edge->count / caller->count;
668 if (edge->frequency)
669 time *= cgraph_freq_base_rec * edge->frequency;
670 else
671 time = time >> 11;
673 /* This calculation should match one in ipa-inline-analysis.c
674 (estimate_edge_size_and_time). */
675 time -= (sreal) edge->frequency
676 * ipa_call_summaries->get (edge)->call_stmt_time / CGRAPH_FREQ_BASE;
677 time += caller_time;
678 if (time <= 0)
679 time = ((sreal) 1) >> 8;
680 gcc_checking_assert (time >= 0);
681 return time;
684 /* Return true if the speedup for inlining E is bigger than
685 PARAM_MAX_INLINE_MIN_SPEEDUP. */
687 static bool
688 big_speedup_p (struct cgraph_edge *e)
690 sreal unspec_time;
691 sreal spec_time = estimate_edge_time (e, &unspec_time);
692 sreal time = compute_uninlined_call_time (e, unspec_time);
693 sreal inlined_time = compute_inlined_call_time (e, spec_time);
695 if (time - inlined_time
696 > (sreal) (time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP))
697 * percent_rec)
698 return true;
699 return false;
702 /* Return true if we are interested in inlining small function.
703 When REPORT is true, report reason to dump file. */
705 static bool
706 want_inline_small_function_p (struct cgraph_edge *e, bool report)
708 bool want_inline = true;
709 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
711 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
713 else if (!DECL_DECLARED_INLINE_P (callee->decl)
714 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
716 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
717 want_inline = false;
719 /* Do fast and conservative check if the function can be good
720 inline candidate. At the moment we allow inline hints to
721 promote non-inline functions to inline and we increase
722 MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
723 else if ((!DECL_DECLARED_INLINE_P (callee->decl)
724 && (!e->count || !e->maybe_hot_p ()))
725 && inline_summaries->get (callee)->min_size
726 - ipa_call_summaries->get (e)->call_stmt_size
727 > MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
729 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
730 want_inline = false;
732 else if ((DECL_DECLARED_INLINE_P (callee->decl) || e->count)
733 && inline_summaries->get (callee)->min_size
734 - ipa_call_summaries->get (e)->call_stmt_size
735 > 16 * MAX_INLINE_INSNS_SINGLE)
737 e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
738 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
739 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
740 want_inline = false;
742 else
744 int growth = estimate_edge_growth (e);
745 inline_hints hints = estimate_edge_hints (e);
746 bool big_speedup = big_speedup_p (e);
748 if (growth <= 0)
750 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
751 hints suggests that inlining given function is very profitable. */
752 else if (DECL_DECLARED_INLINE_P (callee->decl)
753 && growth >= MAX_INLINE_INSNS_SINGLE
754 && ((!big_speedup
755 && !(hints & (INLINE_HINT_indirect_call
756 | INLINE_HINT_known_hot
757 | INLINE_HINT_loop_iterations
758 | INLINE_HINT_array_index
759 | INLINE_HINT_loop_stride)))
760 || growth >= MAX_INLINE_INSNS_SINGLE * 16))
762 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
763 want_inline = false;
765 else if (!DECL_DECLARED_INLINE_P (callee->decl)
766 && !opt_for_fn (e->caller->decl, flag_inline_functions))
768 /* growth_likely_positive is expensive, always test it last. */
769 if (growth >= MAX_INLINE_INSNS_SINGLE
770 || growth_likely_positive (callee, growth))
772 e->inline_failed = CIF_NOT_DECLARED_INLINED;
773 want_inline = false;
776 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
777 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
778 inlining given function is very profitable. */
779 else if (!DECL_DECLARED_INLINE_P (callee->decl)
780 && !big_speedup
781 && !(hints & INLINE_HINT_known_hot)
782 && growth >= ((hints & (INLINE_HINT_indirect_call
783 | INLINE_HINT_loop_iterations
784 | INLINE_HINT_array_index
785 | INLINE_HINT_loop_stride))
786 ? MAX (MAX_INLINE_INSNS_AUTO,
787 MAX_INLINE_INSNS_SINGLE)
788 : MAX_INLINE_INSNS_AUTO))
790 /* growth_likely_positive is expensive, always test it last. */
791 if (growth >= MAX_INLINE_INSNS_SINGLE
792 || growth_likely_positive (callee, growth))
794 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
795 want_inline = false;
798 /* If call is cold, do not inline when function body would grow. */
799 else if (!e->maybe_hot_p ()
800 && (growth >= MAX_INLINE_INSNS_SINGLE
801 || growth_likely_positive (callee, growth)))
803 e->inline_failed = CIF_UNLIKELY_CALL;
804 want_inline = false;
807 if (!want_inline && report)
808 report_inline_failed_reason (e);
809 return want_inline;
812 /* EDGE is self recursive edge.
813 We hand two cases - when function A is inlining into itself
814 or when function A is being inlined into another inliner copy of function
815 A within function B.
817 In first case OUTER_NODE points to the toplevel copy of A, while
818 in the second case OUTER_NODE points to the outermost copy of A in B.
820 In both cases we want to be extra selective since
821 inlining the call will just introduce new recursive calls to appear. */
823 static bool
824 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
825 struct cgraph_node *outer_node,
826 bool peeling,
827 int depth)
829 char const *reason = NULL;
830 bool want_inline = true;
831 int caller_freq = CGRAPH_FREQ_BASE;
832 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
834 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
835 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
837 if (!edge->maybe_hot_p ())
839 reason = "recursive call is cold";
840 want_inline = false;
842 else if (max_count && !outer_node->count)
844 reason = "not executed in profile";
845 want_inline = false;
847 else if (depth > max_depth)
849 reason = "--param max-inline-recursive-depth exceeded.";
850 want_inline = false;
853 if (outer_node->global.inlined_to)
854 caller_freq = outer_node->callers->frequency;
856 if (!caller_freq)
858 reason = "function is inlined and unlikely";
859 want_inline = false;
862 if (!want_inline)
864 /* Inlining of self recursive function into copy of itself within other function
865 is transformation similar to loop peeling.
867 Peeling is profitable if we can inline enough copies to make probability
868 of actual call to the self recursive function very small. Be sure that
869 the probability of recursion is small.
871 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
872 This way the expected number of recision is at most max_depth. */
873 else if (peeling)
875 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
876 / max_depth);
877 int i;
878 for (i = 1; i < depth; i++)
879 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
880 if (max_count
881 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
882 >= max_prob))
884 reason = "profile of recursive call is too large";
885 want_inline = false;
887 if (!max_count
888 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
889 >= max_prob))
891 reason = "frequency of recursive call is too large";
892 want_inline = false;
895 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
896 depth is large. We reduce function call overhead and increase chances that
897 things fit in hardware return predictor.
899 Recursive inlining might however increase cost of stack frame setup
900 actually slowing down functions whose recursion tree is wide rather than
901 deep.
903 Deciding reliably on when to do recursive inlining without profile feedback
904 is tricky. For now we disable recursive inlining when probability of self
905 recursion is low.
907 Recursive inlining of self recursive call within loop also results in large loop
908 depths that generally optimize badly. We may want to throttle down inlining
909 in those cases. In particular this seems to happen in one of libstdc++ rb tree
910 methods. */
911 else
913 if (max_count
914 && (edge->count * 100 / outer_node->count
915 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
917 reason = "profile of recursive call is too small";
918 want_inline = false;
920 else if (!max_count
921 && (edge->frequency * 100 / caller_freq
922 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
924 reason = "frequency of recursive call is too small";
925 want_inline = false;
928 if (!want_inline && dump_file)
929 fprintf (dump_file, " not inlining recursively: %s\n", reason);
930 return want_inline;
933 /* Return true when NODE has uninlinable caller;
934 set HAS_HOT_CALL if it has hot call.
935 Worker for cgraph_for_node_and_aliases. */
937 static bool
938 check_callers (struct cgraph_node *node, void *has_hot_call)
940 struct cgraph_edge *e;
941 for (e = node->callers; e; e = e->next_caller)
943 if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once))
944 return true;
945 if (!can_inline_edge_p (e, true))
946 return true;
947 if (e->recursive_p ())
948 return true;
949 if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
950 *(bool *)has_hot_call = true;
952 return false;
955 /* If NODE has a caller, return true. */
957 static bool
958 has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
960 if (node->callers)
961 return true;
962 return false;
965 /* Decide if inlining NODE would reduce unit size by eliminating
966 the offline copy of function.
967 When COLD is true the cold calls are considered, too. */
969 static bool
970 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
972 bool has_hot_call = false;
974 /* Aliases gets inlined along with the function they alias. */
975 if (node->alias)
976 return false;
977 /* Already inlined? */
978 if (node->global.inlined_to)
979 return false;
980 /* Does it have callers? */
981 if (!node->call_for_symbol_and_aliases (has_caller_p, NULL, true))
982 return false;
983 /* Inlining into all callers would increase size? */
984 if (estimate_growth (node) > 0)
985 return false;
986 /* All inlines must be possible. */
987 if (node->call_for_symbol_and_aliases (check_callers, &has_hot_call,
988 true))
989 return false;
990 if (!cold && !has_hot_call)
991 return false;
992 return true;
995 /* A cost model driving the inlining heuristics in a way so the edges with
996 smallest badness are inlined first. After each inlining is performed
997 the costs of all caller edges of nodes affected are recomputed so the
998 metrics may accurately depend on values such as number of inlinable callers
999 of the function or function body size. */
1001 static sreal
1002 edge_badness (struct cgraph_edge *edge, bool dump)
1004 sreal badness;
1005 int growth;
1006 sreal edge_time, unspec_edge_time;
1007 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1008 struct inline_summary *callee_info = inline_summaries->get (callee);
1009 inline_hints hints;
1010 cgraph_node *caller = (edge->caller->global.inlined_to
1011 ? edge->caller->global.inlined_to
1012 : edge->caller);
1014 growth = estimate_edge_growth (edge);
1015 edge_time = estimate_edge_time (edge, &unspec_edge_time);
1016 hints = estimate_edge_hints (edge);
1017 gcc_checking_assert (edge_time >= 0);
1018 /* Check that inlined time is better, but tolerate some roundoff issues. */
1019 gcc_checking_assert ((edge_time - callee_info->time).to_int () <= 0);
1020 gcc_checking_assert (growth <= callee_info->size);
1022 if (dump)
1024 fprintf (dump_file, " Badness calculation for %s/%i -> %s/%i\n",
1025 xstrdup_for_dump (edge->caller->name ()),
1026 edge->caller->order,
1027 xstrdup_for_dump (callee->name ()),
1028 edge->callee->order);
1029 fprintf (dump_file, " size growth %i, time %f unspec %f ",
1030 growth,
1031 edge_time.to_double (),
1032 unspec_edge_time.to_double ());
1033 dump_inline_hints (dump_file, hints);
1034 if (big_speedup_p (edge))
1035 fprintf (dump_file, " big_speedup");
1036 fprintf (dump_file, "\n");
1039 /* Always prefer inlining saving code size. */
1040 if (growth <= 0)
1042 badness = (sreal) (-SREAL_MIN_SIG + growth) << (SREAL_MAX_EXP / 256);
1043 if (dump)
1044 fprintf (dump_file, " %f: Growth %d <= 0\n", badness.to_double (),
1045 growth);
1047 /* Inlining into EXTERNAL functions is not going to change anything unless
1048 they are themselves inlined. */
1049 else if (DECL_EXTERNAL (caller->decl))
1051 if (dump)
1052 fprintf (dump_file, " max: function is external\n");
1053 return sreal::max ();
1055 /* When profile is available. Compute badness as:
1057 time_saved * caller_count
1058 goodness = -------------------------------------------------
1059 growth_of_caller * overall_growth * combined_size
1061 badness = - goodness
1063 Again use negative value to make calls with profile appear hotter
1064 then calls without.
1066 else if (opt_for_fn (caller->decl, flag_guess_branch_prob) || caller->count)
1068 sreal numerator, denominator;
1069 int overall_growth;
1070 sreal inlined_time = compute_inlined_call_time (edge, edge_time);
1072 numerator = (compute_uninlined_call_time (edge, unspec_edge_time)
1073 - inlined_time);
1074 if (numerator == 0)
1075 numerator = ((sreal) 1 >> 8);
1076 if (caller->count)
1077 numerator *= caller->count;
1078 else if (opt_for_fn (caller->decl, flag_branch_probabilities))
1079 numerator = numerator >> 11;
1080 denominator = growth;
1082 overall_growth = callee_info->growth;
1084 /* Look for inliner wrappers of the form:
1086 inline_caller ()
1088 do_fast_job...
1089 if (need_more_work)
1090 noninline_callee ();
1092 Withhout panilizing this case, we usually inline noninline_callee
1093 into the inline_caller because overall_growth is small preventing
1094 further inlining of inline_caller.
1096 Penalize only callgraph edges to functions with small overall
1097 growth ...
1099 if (growth > overall_growth
1100 /* ... and having only one caller which is not inlined ... */
1101 && callee_info->single_caller
1102 && !edge->caller->global.inlined_to
1103 /* ... and edges executed only conditionally ... */
1104 && edge->frequency < CGRAPH_FREQ_BASE
1105 /* ... consider case where callee is not inline but caller is ... */
1106 && ((!DECL_DECLARED_INLINE_P (edge->callee->decl)
1107 && DECL_DECLARED_INLINE_P (caller->decl))
1108 /* ... or when early optimizers decided to split and edge
1109 frequency still indicates splitting is a win ... */
1110 || (callee->split_part && !caller->split_part
1111 && edge->frequency
1112 < CGRAPH_FREQ_BASE
1113 * PARAM_VALUE
1114 (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY) / 100
1115 /* ... and do not overwrite user specified hints. */
1116 && (!DECL_DECLARED_INLINE_P (edge->callee->decl)
1117 || DECL_DECLARED_INLINE_P (caller->decl)))))
1119 struct inline_summary *caller_info = inline_summaries->get (caller);
1120 int caller_growth = caller_info->growth;
1122 /* Only apply the penalty when caller looks like inline candidate,
1123 and it is not called once and. */
1124 if (!caller_info->single_caller && overall_growth < caller_growth
1125 && caller_info->inlinable
1126 && caller_info->size
1127 < (DECL_DECLARED_INLINE_P (caller->decl)
1128 ? MAX_INLINE_INSNS_SINGLE : MAX_INLINE_INSNS_AUTO))
1130 if (dump)
1131 fprintf (dump_file,
1132 " Wrapper penalty. Increasing growth %i to %i\n",
1133 overall_growth, caller_growth);
1134 overall_growth = caller_growth;
1137 if (overall_growth > 0)
1139 /* Strongly preffer functions with few callers that can be inlined
1140 fully. The square root here leads to smaller binaries at average.
1141 Watch however for extreme cases and return to linear function
1142 when growth is large. */
1143 if (overall_growth < 256)
1144 overall_growth *= overall_growth;
1145 else
1146 overall_growth += 256 * 256 - 256;
1147 denominator *= overall_growth;
1149 denominator *= inlined_time;
1151 badness = - numerator / denominator;
1153 if (dump)
1155 fprintf (dump_file,
1156 " %f: guessed profile. frequency %f, count %" PRId64
1157 " caller count %" PRId64
1158 " time w/o inlining %f, time with inlining %f"
1159 " overall growth %i (current) %i (original)"
1160 " %i (compensated)\n",
1161 badness.to_double (),
1162 (double)edge->frequency / CGRAPH_FREQ_BASE,
1163 edge->count, caller->count,
1164 compute_uninlined_call_time (edge,
1165 unspec_edge_time).to_double (),
1166 compute_inlined_call_time (edge, edge_time).to_double (),
1167 estimate_growth (callee),
1168 callee_info->growth, overall_growth);
1171 /* When function local profile is not available or it does not give
1172 useful information (ie frequency is zero), base the cost on
1173 loop nest and overall size growth, so we optimize for overall number
1174 of functions fully inlined in program. */
1175 else
1177 int nest = MIN (ipa_call_summaries->get (edge)->loop_depth, 8);
1178 badness = growth;
1180 /* Decrease badness if call is nested. */
1181 if (badness > 0)
1182 badness = badness >> nest;
1183 else
1184 badness = badness << nest;
1185 if (dump)
1186 fprintf (dump_file, " %f: no profile. nest %i\n",
1187 badness.to_double (), nest);
1189 gcc_checking_assert (badness != 0);
1191 if (edge->recursive_p ())
1192 badness = badness.shift (badness > 0 ? 4 : -4);
1193 if ((hints & (INLINE_HINT_indirect_call
1194 | INLINE_HINT_loop_iterations
1195 | INLINE_HINT_array_index
1196 | INLINE_HINT_loop_stride))
1197 || callee_info->growth <= 0)
1198 badness = badness.shift (badness > 0 ? -2 : 2);
1199 if (hints & (INLINE_HINT_same_scc))
1200 badness = badness.shift (badness > 0 ? 3 : -3);
1201 else if (hints & (INLINE_HINT_in_scc))
1202 badness = badness.shift (badness > 0 ? 2 : -2);
1203 else if (hints & (INLINE_HINT_cross_module))
1204 badness = badness.shift (badness > 0 ? 1 : -1);
1205 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1206 badness = badness.shift (badness > 0 ? -4 : 4);
1207 else if ((hints & INLINE_HINT_declared_inline))
1208 badness = badness.shift (badness > 0 ? -3 : 3);
1209 if (dump)
1210 fprintf (dump_file, " Adjusted by hints %f\n", badness.to_double ());
1211 return badness;
1214 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1215 static inline void
1216 update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
1218 sreal badness = edge_badness (edge, false);
1219 if (edge->aux)
1221 edge_heap_node_t *n = (edge_heap_node_t *) edge->aux;
1222 gcc_checking_assert (n->get_data () == edge);
1224 /* fibonacci_heap::replace_key does busy updating of the
1225 heap that is unnecesarily expensive.
1226 We do lazy increases: after extracting minimum if the key
1227 turns out to be out of date, it is re-inserted into heap
1228 with correct value. */
1229 if (badness < n->get_key ())
1231 if (dump_file && (dump_flags & TDF_DETAILS))
1233 fprintf (dump_file,
1234 " decreasing badness %s/%i -> %s/%i, %f"
1235 " to %f\n",
1236 xstrdup_for_dump (edge->caller->name ()),
1237 edge->caller->order,
1238 xstrdup_for_dump (edge->callee->name ()),
1239 edge->callee->order,
1240 n->get_key ().to_double (),
1241 badness.to_double ());
1243 heap->decrease_key (n, badness);
1246 else
1248 if (dump_file && (dump_flags & TDF_DETAILS))
1250 fprintf (dump_file,
1251 " enqueuing call %s/%i -> %s/%i, badness %f\n",
1252 xstrdup_for_dump (edge->caller->name ()),
1253 edge->caller->order,
1254 xstrdup_for_dump (edge->callee->name ()),
1255 edge->callee->order,
1256 badness.to_double ());
1258 edge->aux = heap->insert (badness, edge);
1263 /* NODE was inlined.
1264 All caller edges needs to be resetted because
1265 size estimates change. Similarly callees needs reset
1266 because better context may be known. */
1268 static void
1269 reset_edge_caches (struct cgraph_node *node)
1271 struct cgraph_edge *edge;
1272 struct cgraph_edge *e = node->callees;
1273 struct cgraph_node *where = node;
1274 struct ipa_ref *ref;
1276 if (where->global.inlined_to)
1277 where = where->global.inlined_to;
1279 for (edge = where->callers; edge; edge = edge->next_caller)
1280 if (edge->inline_failed)
1281 reset_edge_growth_cache (edge);
1283 FOR_EACH_ALIAS (where, ref)
1284 reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
1286 if (!e)
1287 return;
1289 while (true)
1290 if (!e->inline_failed && e->callee->callees)
1291 e = e->callee->callees;
1292 else
1294 if (e->inline_failed)
1295 reset_edge_growth_cache (e);
1296 if (e->next_callee)
1297 e = e->next_callee;
1298 else
1302 if (e->caller == node)
1303 return;
1304 e = e->caller->callers;
1306 while (!e->next_callee);
1307 e = e->next_callee;
1312 /* Recompute HEAP nodes for each of caller of NODE.
1313 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1314 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1315 it is inlinable. Otherwise check all edges. */
1317 static void
1318 update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
1319 bitmap updated_nodes,
1320 struct cgraph_edge *check_inlinablity_for)
1322 struct cgraph_edge *edge;
1323 struct ipa_ref *ref;
1325 if ((!node->alias && !inline_summaries->get (node)->inlinable)
1326 || node->global.inlined_to)
1327 return;
1328 if (!bitmap_set_bit (updated_nodes, node->uid))
1329 return;
1331 FOR_EACH_ALIAS (node, ref)
1333 struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
1334 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1337 for (edge = node->callers; edge; edge = edge->next_caller)
1338 if (edge->inline_failed)
1340 if (!check_inlinablity_for
1341 || check_inlinablity_for == edge)
1343 if (can_inline_edge_p (edge, false)
1344 && want_inline_small_function_p (edge, false))
1345 update_edge_key (heap, edge);
1346 else if (edge->aux)
1348 report_inline_failed_reason (edge);
1349 heap->delete_node ((edge_heap_node_t *) edge->aux);
1350 edge->aux = NULL;
1353 else if (edge->aux)
1354 update_edge_key (heap, edge);
1358 /* Recompute HEAP nodes for each uninlined call in NODE.
1359 This is used when we know that edge badnesses are going only to increase
1360 (we introduced new call site) and thus all we need is to insert newly
1361 created edges into heap. */
1363 static void
1364 update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
1365 bitmap updated_nodes)
1367 struct cgraph_edge *e = node->callees;
1369 if (!e)
1370 return;
1371 while (true)
1372 if (!e->inline_failed && e->callee->callees)
1373 e = e->callee->callees;
1374 else
1376 enum availability avail;
1377 struct cgraph_node *callee;
1378 /* We do not reset callee growth cache here. Since we added a new call,
1379 growth chould have just increased and consequentely badness metric
1380 don't need updating. */
1381 if (e->inline_failed
1382 && (callee = e->callee->ultimate_alias_target (&avail, e->caller))
1383 && inline_summaries->get (callee)->inlinable
1384 && avail >= AVAIL_AVAILABLE
1385 && !bitmap_bit_p (updated_nodes, callee->uid))
1387 if (can_inline_edge_p (e, false)
1388 && want_inline_small_function_p (e, false))
1389 update_edge_key (heap, e);
1390 else if (e->aux)
1392 report_inline_failed_reason (e);
1393 heap->delete_node ((edge_heap_node_t *) e->aux);
1394 e->aux = NULL;
1397 if (e->next_callee)
1398 e = e->next_callee;
1399 else
1403 if (e->caller == node)
1404 return;
1405 e = e->caller->callers;
1407 while (!e->next_callee);
1408 e = e->next_callee;
1413 /* Enqueue all recursive calls from NODE into priority queue depending on
1414 how likely we want to recursively inline the call. */
1416 static void
1417 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1418 edge_heap_t *heap)
1420 struct cgraph_edge *e;
1421 enum availability avail;
1423 for (e = where->callees; e; e = e->next_callee)
1424 if (e->callee == node
1425 || (e->callee->ultimate_alias_target (&avail, e->caller) == node
1426 && avail > AVAIL_INTERPOSABLE))
1428 /* When profile feedback is available, prioritize by expected number
1429 of calls. */
1430 heap->insert (!max_count ? -e->frequency
1431 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
1434 for (e = where->callees; e; e = e->next_callee)
1435 if (!e->inline_failed)
1436 lookup_recursive_calls (node, e->callee, heap);
1439 /* Decide on recursive inlining: in the case function has recursive calls,
1440 inline until body size reaches given argument. If any new indirect edges
1441 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1442 is NULL. */
1444 static bool
1445 recursive_inlining (struct cgraph_edge *edge,
1446 vec<cgraph_edge *> *new_edges)
1448 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1449 edge_heap_t heap (sreal::min ());
1450 struct cgraph_node *node;
1451 struct cgraph_edge *e;
1452 struct cgraph_node *master_clone = NULL, *next;
1453 int depth = 0;
1454 int n = 0;
1456 node = edge->caller;
1457 if (node->global.inlined_to)
1458 node = node->global.inlined_to;
1460 if (DECL_DECLARED_INLINE_P (node->decl))
1461 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1463 /* Make sure that function is small enough to be considered for inlining. */
1464 if (estimate_size_after_inlining (node, edge) >= limit)
1465 return false;
1466 lookup_recursive_calls (node, node, &heap);
1467 if (heap.empty ())
1468 return false;
1470 if (dump_file)
1471 fprintf (dump_file,
1472 " Performing recursive inlining on %s\n",
1473 node->name ());
1475 /* Do the inlining and update list of recursive call during process. */
1476 while (!heap.empty ())
1478 struct cgraph_edge *curr = heap.extract_min ();
1479 struct cgraph_node *cnode, *dest = curr->callee;
1481 if (!can_inline_edge_p (curr, true))
1482 continue;
1484 /* MASTER_CLONE is produced in the case we already started modified
1485 the function. Be sure to redirect edge to the original body before
1486 estimating growths otherwise we will be seeing growths after inlining
1487 the already modified body. */
1488 if (master_clone)
1490 curr->redirect_callee (master_clone);
1491 reset_edge_growth_cache (curr);
1494 if (estimate_size_after_inlining (node, curr) > limit)
1496 curr->redirect_callee (dest);
1497 reset_edge_growth_cache (curr);
1498 break;
1501 depth = 1;
1502 for (cnode = curr->caller;
1503 cnode->global.inlined_to; cnode = cnode->callers->caller)
1504 if (node->decl
1505 == curr->callee->ultimate_alias_target ()->decl)
1506 depth++;
1508 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1510 curr->redirect_callee (dest);
1511 reset_edge_growth_cache (curr);
1512 continue;
1515 if (dump_file)
1517 fprintf (dump_file,
1518 " Inlining call of depth %i", depth);
1519 if (node->count)
1521 fprintf (dump_file, " called approx. %.2f times per call",
1522 (double)curr->count / node->count);
1524 fprintf (dump_file, "\n");
1526 if (!master_clone)
1528 /* We need original clone to copy around. */
1529 master_clone = node->create_clone (node->decl, node->count,
1530 CGRAPH_FREQ_BASE, false, vNULL,
1531 true, NULL, NULL);
1532 for (e = master_clone->callees; e; e = e->next_callee)
1533 if (!e->inline_failed)
1534 clone_inlined_nodes (e, true, false, NULL, CGRAPH_FREQ_BASE);
1535 curr->redirect_callee (master_clone);
1536 reset_edge_growth_cache (curr);
1539 inline_call (curr, false, new_edges, &overall_size, true);
1540 lookup_recursive_calls (node, curr->callee, &heap);
1541 n++;
1544 if (!heap.empty () && dump_file)
1545 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1547 if (!master_clone)
1548 return false;
1550 if (dump_file)
1551 fprintf (dump_file,
1552 "\n Inlined %i times, "
1553 "body grown from size %i to %i, time %f to %f\n", n,
1554 inline_summaries->get (master_clone)->size,
1555 inline_summaries->get (node)->size,
1556 inline_summaries->get (master_clone)->time.to_double (),
1557 inline_summaries->get (node)->time.to_double ());
1559 /* Remove master clone we used for inlining. We rely that clones inlined
1560 into master clone gets queued just before master clone so we don't
1561 need recursion. */
1562 for (node = symtab->first_function (); node != master_clone;
1563 node = next)
1565 next = symtab->next_function (node);
1566 if (node->global.inlined_to == master_clone)
1567 node->remove ();
1569 master_clone->remove ();
1570 return true;
1574 /* Given whole compilation unit estimate of INSNS, compute how large we can
1575 allow the unit to grow. */
1577 static int
1578 compute_max_insns (int insns)
1580 int max_insns = insns;
1581 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1582 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1584 return ((int64_t) max_insns
1585 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1589 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1591 static void
1592 add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges)
1594 while (new_edges.length () > 0)
1596 struct cgraph_edge *edge = new_edges.pop ();
1598 gcc_assert (!edge->aux);
1599 if (edge->inline_failed
1600 && can_inline_edge_p (edge, true)
1601 && want_inline_small_function_p (edge, true))
1602 edge->aux = heap->insert (edge_badness (edge, false), edge);
1606 /* Remove EDGE from the fibheap. */
1608 static void
1609 heap_edge_removal_hook (struct cgraph_edge *e, void *data)
1611 if (e->aux)
1613 ((edge_heap_t *)data)->delete_node ((edge_heap_node_t *)e->aux);
1614 e->aux = NULL;
1618 /* Return true if speculation of edge E seems useful.
1619 If ANTICIPATE_INLINING is true, be conservative and hope that E
1620 may get inlined. */
1622 bool
1623 speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
1625 enum availability avail;
1626 struct cgraph_node *target = e->callee->ultimate_alias_target (&avail,
1627 e->caller);
1628 struct cgraph_edge *direct, *indirect;
1629 struct ipa_ref *ref;
1631 gcc_assert (e->speculative && !e->indirect_unknown_callee);
1633 if (!e->maybe_hot_p ())
1634 return false;
1636 /* See if IP optimizations found something potentially useful about the
1637 function. For now we look only for CONST/PURE flags. Almost everything
1638 else we propagate is useless. */
1639 if (avail >= AVAIL_AVAILABLE)
1641 int ecf_flags = flags_from_decl_or_type (target->decl);
1642 if (ecf_flags & ECF_CONST)
1644 e->speculative_call_info (direct, indirect, ref);
1645 if (!(indirect->indirect_info->ecf_flags & ECF_CONST))
1646 return true;
1648 else if (ecf_flags & ECF_PURE)
1650 e->speculative_call_info (direct, indirect, ref);
1651 if (!(indirect->indirect_info->ecf_flags & ECF_PURE))
1652 return true;
1655 /* If we did not managed to inline the function nor redirect
1656 to an ipa-cp clone (that are seen by having local flag set),
1657 it is probably pointless to inline it unless hardware is missing
1658 indirect call predictor. */
1659 if (!anticipate_inlining && e->inline_failed && !target->local.local)
1660 return false;
1661 /* For overwritable targets there is not much to do. */
1662 if (e->inline_failed && !can_inline_edge_p (e, false, true))
1663 return false;
1664 /* OK, speculation seems interesting. */
1665 return true;
1668 /* We know that EDGE is not going to be inlined.
1669 See if we can remove speculation. */
1671 static void
1672 resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
1674 if (edge->speculative && !speculation_useful_p (edge, false))
1676 struct cgraph_node *node = edge->caller;
1677 struct cgraph_node *where = node->global.inlined_to
1678 ? node->global.inlined_to : node;
1679 auto_bitmap updated_nodes;
1681 spec_rem += edge->count;
1682 edge->resolve_speculation ();
1683 reset_edge_caches (where);
1684 inline_update_overall_summary (where);
1685 update_caller_keys (edge_heap, where,
1686 updated_nodes, NULL);
1687 update_callee_keys (edge_heap, where,
1688 updated_nodes);
1692 /* Return true if NODE should be accounted for overall size estimate.
1693 Skip all nodes optimized for size so we can measure the growth of hot
1694 part of program no matter of the padding. */
1696 bool
1697 inline_account_function_p (struct cgraph_node *node)
1699 return (!DECL_EXTERNAL (node->decl)
1700 && !opt_for_fn (node->decl, optimize_size)
1701 && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED);
1704 /* Count number of callers of NODE and store it into DATA (that
1705 points to int. Worker for cgraph_for_node_and_aliases. */
1707 static bool
1708 sum_callers (struct cgraph_node *node, void *data)
1710 struct cgraph_edge *e;
1711 int *num_calls = (int *)data;
1713 for (e = node->callers; e; e = e->next_caller)
1714 (*num_calls)++;
1715 return false;
1718 /* We use greedy algorithm for inlining of small functions:
1719 All inline candidates are put into prioritized heap ordered in
1720 increasing badness.
1722 The inlining of small functions is bounded by unit growth parameters. */
1724 static void
1725 inline_small_functions (void)
1727 struct cgraph_node *node;
1728 struct cgraph_edge *edge;
1729 edge_heap_t edge_heap (sreal::min ());
1730 auto_bitmap updated_nodes;
1731 int min_size, max_size;
1732 auto_vec<cgraph_edge *> new_indirect_edges;
1733 int initial_size = 0;
1734 struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
1735 struct cgraph_edge_hook_list *edge_removal_hook_holder;
1736 new_indirect_edges.create (8);
1738 edge_removal_hook_holder
1739 = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap);
1741 /* Compute overall unit size and other global parameters used by badness
1742 metrics. */
1744 max_count = 0;
1745 ipa_reduced_postorder (order, true, true, NULL);
1746 free (order);
1748 FOR_EACH_DEFINED_FUNCTION (node)
1749 if (!node->global.inlined_to)
1751 if (!node->alias && node->analyzed
1752 && (node->has_gimple_body_p () || node->thunk.thunk_p))
1754 struct inline_summary *info = inline_summaries->get (node);
1755 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
1757 /* Do not account external functions, they will be optimized out
1758 if not inlined. Also only count the non-cold portion of program. */
1759 if (inline_account_function_p (node))
1760 initial_size += info->size;
1761 info->growth = estimate_growth (node);
1763 int num_calls = 0;
1764 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
1765 true);
1766 if (num_calls == 1)
1767 info->single_caller = true;
1768 if (dfs && dfs->next_cycle)
1770 struct cgraph_node *n2;
1771 int id = dfs->scc_no + 1;
1772 for (n2 = node; n2;
1773 n2 = ((struct ipa_dfs_info *) node->aux)->next_cycle)
1775 struct inline_summary *info2 = inline_summaries->get (n2);
1776 if (info2->scc_no)
1777 break;
1778 info2->scc_no = id;
1783 for (edge = node->callers; edge; edge = edge->next_caller)
1784 if (max_count < edge->count)
1785 max_count = edge->count;
1787 ipa_free_postorder_info ();
1788 initialize_growth_caches ();
1790 if (dump_file)
1791 fprintf (dump_file,
1792 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1793 initial_size);
1795 overall_size = initial_size;
1796 max_size = compute_max_insns (overall_size);
1797 min_size = overall_size;
1799 /* Populate the heap with all edges we might inline. */
1801 FOR_EACH_DEFINED_FUNCTION (node)
1803 bool update = false;
1804 struct cgraph_edge *next = NULL;
1805 bool has_speculative = false;
1807 if (dump_file)
1808 fprintf (dump_file, "Enqueueing calls in %s/%i.\n",
1809 node->name (), node->order);
1811 for (edge = node->callees; edge; edge = next)
1813 next = edge->next_callee;
1814 if (edge->inline_failed
1815 && !edge->aux
1816 && can_inline_edge_p (edge, true)
1817 && want_inline_small_function_p (edge, true)
1818 && edge->inline_failed)
1820 gcc_assert (!edge->aux);
1821 update_edge_key (&edge_heap, edge);
1823 if (edge->speculative)
1824 has_speculative = true;
1826 if (has_speculative)
1827 for (edge = node->callees; edge; edge = next)
1828 if (edge->speculative && !speculation_useful_p (edge,
1829 edge->aux != NULL))
1831 edge->resolve_speculation ();
1832 update = true;
1834 if (update)
1836 struct cgraph_node *where = node->global.inlined_to
1837 ? node->global.inlined_to : node;
1838 inline_update_overall_summary (where);
1839 reset_edge_caches (where);
1840 update_caller_keys (&edge_heap, where,
1841 updated_nodes, NULL);
1842 update_callee_keys (&edge_heap, where,
1843 updated_nodes);
1844 bitmap_clear (updated_nodes);
1848 gcc_assert (in_lto_p
1849 || !max_count
1850 || (profile_info && flag_branch_probabilities));
1852 while (!edge_heap.empty ())
1854 int old_size = overall_size;
1855 struct cgraph_node *where, *callee;
1856 sreal badness = edge_heap.min_key ();
1857 sreal current_badness;
1858 int growth;
1860 edge = edge_heap.extract_min ();
1861 gcc_assert (edge->aux);
1862 edge->aux = NULL;
1863 if (!edge->inline_failed || !edge->callee->analyzed)
1864 continue;
1866 #if CHECKING_P
1867 /* Be sure that caches are maintained consistent. */
1868 sreal cached_badness = edge_badness (edge, false);
1870 int old_size_est = estimate_edge_size (edge);
1871 sreal old_time_est = estimate_edge_time (edge);
1872 int old_hints_est = estimate_edge_hints (edge);
1874 reset_edge_growth_cache (edge);
1875 gcc_assert (old_size_est == estimate_edge_size (edge));
1876 gcc_assert (old_time_est == estimate_edge_time (edge));
1877 /* FIXME:
1879 gcc_assert (old_hints_est == estimate_edge_hints (edge));
1881 fails with profile feedback because some hints depends on
1882 maybe_hot_edge_p predicate and because callee gets inlined to other
1883 calls, the edge may become cold.
1884 This ought to be fixed by computing relative probabilities
1885 for given invocation but that will be better done once whole
1886 code is converted to sreals. Disable for now and revert to "wrong"
1887 value so enable/disable checking paths agree. */
1888 edge_growth_cache[edge->uid].hints = old_hints_est + 1;
1890 /* When updating the edge costs, we only decrease badness in the keys.
1891 Increases of badness are handled lazilly; when we see key with out
1892 of date value on it, we re-insert it now. */
1893 current_badness = edge_badness (edge, false);
1894 /* Disable checking for profile because roundoff errors may cause slight
1895 deviations in the order. */
1896 gcc_assert (max_count || cached_badness == current_badness);
1897 gcc_assert (current_badness >= badness);
1898 #else
1899 current_badness = edge_badness (edge, false);
1900 #endif
1901 if (current_badness != badness)
1903 if (edge_heap.min () && current_badness > edge_heap.min_key ())
1905 edge->aux = edge_heap.insert (current_badness, edge);
1906 continue;
1908 else
1909 badness = current_badness;
1912 if (!can_inline_edge_p (edge, true))
1914 resolve_noninline_speculation (&edge_heap, edge);
1915 continue;
1918 callee = edge->callee->ultimate_alias_target ();
1919 growth = estimate_edge_growth (edge);
1920 if (dump_file)
1922 fprintf (dump_file,
1923 "\nConsidering %s/%i with %i size\n",
1924 callee->name (), callee->order,
1925 inline_summaries->get (callee)->size);
1926 fprintf (dump_file,
1927 " to be inlined into %s/%i in %s:%i\n"
1928 " Estimated badness is %f, frequency %.2f.\n",
1929 edge->caller->name (), edge->caller->order,
1930 edge->call_stmt
1931 && (LOCATION_LOCUS (gimple_location ((const gimple *)
1932 edge->call_stmt))
1933 > BUILTINS_LOCATION)
1934 ? gimple_filename ((const gimple *) edge->call_stmt)
1935 : "unknown",
1936 edge->call_stmt
1937 ? gimple_lineno ((const gimple *) edge->call_stmt)
1938 : -1,
1939 badness.to_double (),
1940 edge->frequency / (double)CGRAPH_FREQ_BASE);
1941 if (edge->count)
1942 fprintf (dump_file," Called %" PRId64"x\n",
1943 edge->count);
1944 if (dump_flags & TDF_DETAILS)
1945 edge_badness (edge, true);
1948 if (overall_size + growth > max_size
1949 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1951 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1952 report_inline_failed_reason (edge);
1953 resolve_noninline_speculation (&edge_heap, edge);
1954 continue;
1957 if (!want_inline_small_function_p (edge, true))
1959 resolve_noninline_speculation (&edge_heap, edge);
1960 continue;
1963 /* Heuristics for inlining small functions work poorly for
1964 recursive calls where we do effects similar to loop unrolling.
1965 When inlining such edge seems profitable, leave decision on
1966 specific inliner. */
1967 if (edge->recursive_p ())
1969 where = edge->caller;
1970 if (where->global.inlined_to)
1971 where = where->global.inlined_to;
1972 if (!recursive_inlining (edge,
1973 opt_for_fn (edge->caller->decl,
1974 flag_indirect_inlining)
1975 ? &new_indirect_edges : NULL))
1977 edge->inline_failed = CIF_RECURSIVE_INLINING;
1978 resolve_noninline_speculation (&edge_heap, edge);
1979 continue;
1981 reset_edge_caches (where);
1982 /* Recursive inliner inlines all recursive calls of the function
1983 at once. Consequently we need to update all callee keys. */
1984 if (opt_for_fn (edge->caller->decl, flag_indirect_inlining))
1985 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
1986 update_callee_keys (&edge_heap, where, updated_nodes);
1987 bitmap_clear (updated_nodes);
1989 else
1991 struct cgraph_node *outer_node = NULL;
1992 int depth = 0;
1994 /* Consider the case where self recursive function A is inlined
1995 into B. This is desired optimization in some cases, since it
1996 leads to effect similar of loop peeling and we might completely
1997 optimize out the recursive call. However we must be extra
1998 selective. */
2000 where = edge->caller;
2001 while (where->global.inlined_to)
2003 if (where->decl == callee->decl)
2004 outer_node = where, depth++;
2005 where = where->callers->caller;
2007 if (outer_node
2008 && !want_inline_self_recursive_call_p (edge, outer_node,
2009 true, depth))
2011 edge->inline_failed
2012 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
2013 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
2014 resolve_noninline_speculation (&edge_heap, edge);
2015 continue;
2017 else if (depth && dump_file)
2018 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
2020 gcc_checking_assert (!callee->global.inlined_to);
2021 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
2022 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
2024 reset_edge_caches (edge->callee);
2026 update_callee_keys (&edge_heap, where, updated_nodes);
2028 where = edge->caller;
2029 if (where->global.inlined_to)
2030 where = where->global.inlined_to;
2032 /* Our profitability metric can depend on local properties
2033 such as number of inlinable calls and size of the function body.
2034 After inlining these properties might change for the function we
2035 inlined into (since it's body size changed) and for the functions
2036 called by function we inlined (since number of it inlinable callers
2037 might change). */
2038 update_caller_keys (&edge_heap, where, updated_nodes, NULL);
2039 /* Offline copy count has possibly changed, recompute if profile is
2040 available. */
2041 if (max_count)
2043 struct cgraph_node *n = cgraph_node::get (edge->callee->decl);
2044 if (n != edge->callee && n->analyzed)
2045 update_callee_keys (&edge_heap, n, updated_nodes);
2047 bitmap_clear (updated_nodes);
2049 if (dump_file)
2051 fprintf (dump_file,
2052 " Inlined %s into %s which now has time %f and size %i, "
2053 "net change of %+i.\n",
2054 edge->callee->name (),
2055 edge->caller->name (),
2056 inline_summaries->get (edge->caller)->time.to_double (),
2057 inline_summaries->get (edge->caller)->size,
2058 overall_size - old_size);
2060 if (min_size > overall_size)
2062 min_size = overall_size;
2063 max_size = compute_max_insns (min_size);
2065 if (dump_file)
2066 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
2070 free_growth_caches ();
2071 if (dump_file)
2072 fprintf (dump_file,
2073 "Unit growth for small function inlining: %i->%i (%i%%)\n",
2074 initial_size, overall_size,
2075 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
2076 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
2079 /* Flatten NODE. Performed both during early inlining and
2080 at IPA inlining time. */
2082 static void
2083 flatten_function (struct cgraph_node *node, bool early)
2085 struct cgraph_edge *e;
2087 /* We shouldn't be called recursively when we are being processed. */
2088 gcc_assert (node->aux == NULL);
2090 node->aux = (void *) node;
2092 for (e = node->callees; e; e = e->next_callee)
2094 struct cgraph_node *orig_callee;
2095 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2097 /* We've hit cycle? It is time to give up. */
2098 if (callee->aux)
2100 if (dump_file)
2101 fprintf (dump_file,
2102 "Not inlining %s into %s to avoid cycle.\n",
2103 xstrdup_for_dump (callee->name ()),
2104 xstrdup_for_dump (e->caller->name ()));
2105 e->inline_failed = CIF_RECURSIVE_INLINING;
2106 continue;
2109 /* When the edge is already inlined, we just need to recurse into
2110 it in order to fully flatten the leaves. */
2111 if (!e->inline_failed)
2113 flatten_function (callee, early);
2114 continue;
2117 /* Flatten attribute needs to be processed during late inlining. For
2118 extra code quality we however do flattening during early optimization,
2119 too. */
2120 if (!early
2121 ? !can_inline_edge_p (e, true)
2122 : !can_early_inline_edge_p (e))
2123 continue;
2125 if (e->recursive_p ())
2127 if (dump_file)
2128 fprintf (dump_file, "Not inlining: recursive call.\n");
2129 continue;
2132 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
2133 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
2135 if (dump_file)
2136 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
2137 continue;
2140 /* Inline the edge and flatten the inline clone. Avoid
2141 recursing through the original node if the node was cloned. */
2142 if (dump_file)
2143 fprintf (dump_file, " Inlining %s into %s.\n",
2144 xstrdup_for_dump (callee->name ()),
2145 xstrdup_for_dump (e->caller->name ()));
2146 orig_callee = callee;
2147 inline_call (e, true, NULL, NULL, false);
2148 if (e->callee != orig_callee)
2149 orig_callee->aux = (void *) node;
2150 flatten_function (e->callee, early);
2151 if (e->callee != orig_callee)
2152 orig_callee->aux = NULL;
2155 node->aux = NULL;
2156 if (!node->global.inlined_to)
2157 inline_update_overall_summary (node);
2160 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
2161 DATA points to number of calls originally found so we avoid infinite
2162 recursion. */
2164 static bool
2165 inline_to_all_callers_1 (struct cgraph_node *node, void *data,
2166 hash_set<cgraph_node *> *callers)
2168 int *num_calls = (int *)data;
2169 bool callee_removed = false;
2171 while (node->callers && !node->global.inlined_to)
2173 struct cgraph_node *caller = node->callers->caller;
2175 if (!can_inline_edge_p (node->callers, true)
2176 || node->callers->recursive_p ())
2178 if (dump_file)
2179 fprintf (dump_file, "Uninlinable call found; giving up.\n");
2180 *num_calls = 0;
2181 return false;
2184 if (dump_file)
2186 fprintf (dump_file,
2187 "\nInlining %s size %i.\n",
2188 node->name (),
2189 inline_summaries->get (node)->size);
2190 fprintf (dump_file,
2191 " Called once from %s %i insns.\n",
2192 node->callers->caller->name (),
2193 inline_summaries->get (node->callers->caller)->size);
2196 /* Remember which callers we inlined to, delaying updating the
2197 overall summary. */
2198 callers->add (node->callers->caller);
2199 inline_call (node->callers, true, NULL, NULL, false, &callee_removed);
2200 if (dump_file)
2201 fprintf (dump_file,
2202 " Inlined into %s which now has %i size\n",
2203 caller->name (),
2204 inline_summaries->get (caller)->size);
2205 if (!(*num_calls)--)
2207 if (dump_file)
2208 fprintf (dump_file, "New calls found; giving up.\n");
2209 return callee_removed;
2211 if (callee_removed)
2212 return true;
2214 return false;
2217 /* Wrapper around inline_to_all_callers_1 doing delayed overall summary
2218 update. */
2220 static bool
2221 inline_to_all_callers (struct cgraph_node *node, void *data)
2223 hash_set<cgraph_node *> callers;
2224 bool res = inline_to_all_callers_1 (node, data, &callers);
2225 /* Perform the delayed update of the overall summary of all callers
2226 processed. This avoids quadratic behavior in the cases where
2227 we have a lot of calls to the same function. */
2228 for (hash_set<cgraph_node *>::iterator i = callers.begin ();
2229 i != callers.end (); ++i)
2230 inline_update_overall_summary (*i);
2231 return res;
2234 /* Output overall time estimate. */
2235 static void
2236 dump_overall_stats (void)
2238 sreal sum_weighted = 0, sum = 0;
2239 struct cgraph_node *node;
2241 FOR_EACH_DEFINED_FUNCTION (node)
2242 if (!node->global.inlined_to
2243 && !node->alias)
2245 sreal time = inline_summaries->get (node)->time;
2246 sum += time;
2247 sum_weighted += time * node->count;
2249 fprintf (dump_file, "Overall time estimate: "
2250 "%f weighted by profile: "
2251 "%f\n", sum.to_double (), sum_weighted.to_double ());
2254 /* Output some useful stats about inlining. */
2256 static void
2257 dump_inline_stats (void)
2259 int64_t inlined_cnt = 0, inlined_indir_cnt = 0;
2260 int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0;
2261 int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0;
2262 int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
2263 int64_t inlined_speculative = 0, inlined_speculative_ply = 0;
2264 int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
2265 int64_t reason[CIF_N_REASONS][3];
2266 int i;
2267 struct cgraph_node *node;
2269 memset (reason, 0, sizeof (reason));
2270 FOR_EACH_DEFINED_FUNCTION (node)
2272 struct cgraph_edge *e;
2273 for (e = node->callees; e; e = e->next_callee)
2275 if (e->inline_failed)
2277 reason[(int) e->inline_failed][0] += e->count;
2278 reason[(int) e->inline_failed][1] += e->frequency;
2279 reason[(int) e->inline_failed][2] ++;
2280 if (DECL_VIRTUAL_P (e->callee->decl))
2282 if (e->indirect_inlining_edge)
2283 noninlined_virt_indir_cnt += e->count;
2284 else
2285 noninlined_virt_cnt += e->count;
2287 else
2289 if (e->indirect_inlining_edge)
2290 noninlined_indir_cnt += e->count;
2291 else
2292 noninlined_cnt += e->count;
2295 else
2297 if (e->speculative)
2299 if (DECL_VIRTUAL_P (e->callee->decl))
2300 inlined_speculative_ply += e->count;
2301 else
2302 inlined_speculative += e->count;
2304 else if (DECL_VIRTUAL_P (e->callee->decl))
2306 if (e->indirect_inlining_edge)
2307 inlined_virt_indir_cnt += e->count;
2308 else
2309 inlined_virt_cnt += e->count;
2311 else
2313 if (e->indirect_inlining_edge)
2314 inlined_indir_cnt += e->count;
2315 else
2316 inlined_cnt += e->count;
2320 for (e = node->indirect_calls; e; e = e->next_callee)
2321 if (e->indirect_info->polymorphic)
2322 indirect_poly_cnt += e->count;
2323 else
2324 indirect_cnt += e->count;
2326 if (max_count)
2328 fprintf (dump_file,
2329 "Inlined %" PRId64 " + speculative "
2330 "%" PRId64 " + speculative polymorphic "
2331 "%" PRId64 " + previously indirect "
2332 "%" PRId64 " + virtual "
2333 "%" PRId64 " + virtual and previously indirect "
2334 "%" PRId64 "\n" "Not inlined "
2335 "%" PRId64 " + previously indirect "
2336 "%" PRId64 " + virtual "
2337 "%" PRId64 " + virtual and previously indirect "
2338 "%" PRId64 " + stil indirect "
2339 "%" PRId64 " + still indirect polymorphic "
2340 "%" PRId64 "\n", inlined_cnt,
2341 inlined_speculative, inlined_speculative_ply,
2342 inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
2343 noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
2344 noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
2345 fprintf (dump_file,
2346 "Removed speculations %" PRId64 "\n",
2347 spec_rem);
2349 dump_overall_stats ();
2350 fprintf (dump_file, "\nWhy inlining failed?\n");
2351 for (i = 0; i < CIF_N_REASONS; i++)
2352 if (reason[i][2])
2353 fprintf (dump_file, "%-50s: %8i calls, %8i freq, %" PRId64" count\n",
2354 cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
2355 (int) reason[i][2], (int) reason[i][1], reason[i][0]);
2358 /* Decide on the inlining. We do so in the topological order to avoid
2359 expenses on updating data structures. */
2361 static unsigned int
2362 ipa_inline (void)
2364 struct cgraph_node *node;
2365 int nnodes;
2366 struct cgraph_node **order;
2367 int i;
2368 int cold;
2369 bool remove_functions = false;
2371 if (!optimize)
2372 return 0;
2374 cgraph_freq_base_rec = (sreal) 1 / (sreal) CGRAPH_FREQ_BASE;
2375 percent_rec = (sreal) 1 / (sreal) 100;
2377 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
2379 if (dump_file)
2380 dump_inline_summaries (dump_file);
2382 nnodes = ipa_reverse_postorder (order);
2384 FOR_EACH_FUNCTION (node)
2386 node->aux = 0;
2388 /* Recompute the default reasons for inlining because they may have
2389 changed during merging. */
2390 if (in_lto_p)
2392 for (cgraph_edge *e = node->callees; e; e = e->next_callee)
2394 gcc_assert (e->inline_failed);
2395 initialize_inline_failed (e);
2397 for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
2398 initialize_inline_failed (e);
2402 if (dump_file)
2403 fprintf (dump_file, "\nFlattening functions:\n");
2405 /* In the first pass handle functions to be flattened. Do this with
2406 a priority so none of our later choices will make this impossible. */
2407 for (i = nnodes - 1; i >= 0; i--)
2409 node = order[i];
2411 /* Handle nodes to be flattened.
2412 Ideally when processing callees we stop inlining at the
2413 entry of cycles, possibly cloning that entry point and
2414 try to flatten itself turning it into a self-recursive
2415 function. */
2416 if (lookup_attribute ("flatten",
2417 DECL_ATTRIBUTES (node->decl)) != NULL)
2419 if (dump_file)
2420 fprintf (dump_file,
2421 "Flattening %s\n", node->name ());
2422 flatten_function (node, false);
2425 if (dump_file)
2426 dump_overall_stats ();
2428 inline_small_functions ();
2430 gcc_assert (symtab->state == IPA_SSA);
2431 symtab->state = IPA_SSA_AFTER_INLINING;
2432 /* Do first after-inlining removal. We want to remove all "stale" extern
2433 inline functions and virtual functions so we really know what is called
2434 once. */
2435 symtab->remove_unreachable_nodes (dump_file);
2436 free (order);
2438 /* Inline functions with a property that after inlining into all callers the
2439 code size will shrink because the out-of-line copy is eliminated.
2440 We do this regardless on the callee size as long as function growth limits
2441 are met. */
2442 if (dump_file)
2443 fprintf (dump_file,
2444 "\nDeciding on functions to be inlined into all callers and "
2445 "removing useless speculations:\n");
2447 /* Inlining one function called once has good chance of preventing
2448 inlining other function into the same callee. Ideally we should
2449 work in priority order, but probably inlining hot functions first
2450 is good cut without the extra pain of maintaining the queue.
2452 ??? this is not really fitting the bill perfectly: inlining function
2453 into callee often leads to better optimization of callee due to
2454 increased context for optimization.
2455 For example if main() function calls a function that outputs help
2456 and then function that does the main optmization, we should inline
2457 the second with priority even if both calls are cold by themselves.
2459 We probably want to implement new predicate replacing our use of
2460 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2461 to be hot. */
2462 for (cold = 0; cold <= 1; cold ++)
2464 FOR_EACH_DEFINED_FUNCTION (node)
2466 struct cgraph_edge *edge, *next;
2467 bool update=false;
2469 for (edge = node->callees; edge; edge = next)
2471 next = edge->next_callee;
2472 if (edge->speculative && !speculation_useful_p (edge, false))
2474 edge->resolve_speculation ();
2475 spec_rem += edge->count;
2476 update = true;
2477 remove_functions = true;
2480 if (update)
2482 struct cgraph_node *where = node->global.inlined_to
2483 ? node->global.inlined_to : node;
2484 reset_edge_caches (where);
2485 inline_update_overall_summary (where);
2487 if (want_inline_function_to_all_callers_p (node, cold))
2489 int num_calls = 0;
2490 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
2491 true);
2492 while (node->call_for_symbol_and_aliases
2493 (inline_to_all_callers, &num_calls, true))
2495 remove_functions = true;
2500 /* Free ipa-prop structures if they are no longer needed. */
2501 if (optimize)
2502 ipa_free_all_structures_after_iinln ();
2504 if (dump_file)
2506 fprintf (dump_file,
2507 "\nInlined %i calls, eliminated %i functions\n\n",
2508 ncalls_inlined, nfunctions_inlined);
2509 dump_inline_stats ();
2512 if (dump_file)
2513 dump_inline_summaries (dump_file);
2514 /* In WPA we use inline summaries for partitioning process. */
2515 if (!flag_wpa)
2516 inline_free_summary ();
2517 return remove_functions ? TODO_remove_functions : 0;
2520 /* Inline always-inline function calls in NODE. */
2522 static bool
2523 inline_always_inline_functions (struct cgraph_node *node)
2525 struct cgraph_edge *e;
2526 bool inlined = false;
2528 for (e = node->callees; e; e = e->next_callee)
2530 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2531 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
2532 continue;
2534 if (e->recursive_p ())
2536 if (dump_file)
2537 fprintf (dump_file, " Not inlining recursive call to %s.\n",
2538 e->callee->name ());
2539 e->inline_failed = CIF_RECURSIVE_INLINING;
2540 continue;
2543 if (!can_early_inline_edge_p (e))
2545 /* Set inlined to true if the callee is marked "always_inline" but
2546 is not inlinable. This will allow flagging an error later in
2547 expand_call_inline in tree-inline.c. */
2548 if (lookup_attribute ("always_inline",
2549 DECL_ATTRIBUTES (callee->decl)) != NULL)
2550 inlined = true;
2551 continue;
2554 if (dump_file)
2555 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
2556 xstrdup_for_dump (e->callee->name ()),
2557 xstrdup_for_dump (e->caller->name ()));
2558 inline_call (e, true, NULL, NULL, false);
2559 inlined = true;
2561 if (inlined)
2562 inline_update_overall_summary (node);
2564 return inlined;
2567 /* Decide on the inlining. We do so in the topological order to avoid
2568 expenses on updating data structures. */
2570 static bool
2571 early_inline_small_functions (struct cgraph_node *node)
2573 struct cgraph_edge *e;
2574 bool inlined = false;
2576 for (e = node->callees; e; e = e->next_callee)
2578 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2579 if (!inline_summaries->get (callee)->inlinable
2580 || !e->inline_failed)
2581 continue;
2583 /* Do not consider functions not declared inline. */
2584 if (!DECL_DECLARED_INLINE_P (callee->decl)
2585 && !opt_for_fn (node->decl, flag_inline_small_functions)
2586 && !opt_for_fn (node->decl, flag_inline_functions))
2587 continue;
2589 if (dump_file)
2590 fprintf (dump_file, "Considering inline candidate %s.\n",
2591 callee->name ());
2593 if (!can_early_inline_edge_p (e))
2594 continue;
2596 if (e->recursive_p ())
2598 if (dump_file)
2599 fprintf (dump_file, " Not inlining: recursive call.\n");
2600 continue;
2603 if (!want_early_inline_function_p (e))
2604 continue;
2606 if (dump_file)
2607 fprintf (dump_file, " Inlining %s into %s.\n",
2608 xstrdup_for_dump (callee->name ()),
2609 xstrdup_for_dump (e->caller->name ()));
2610 inline_call (e, true, NULL, NULL, false);
2611 inlined = true;
2614 if (inlined)
2615 inline_update_overall_summary (node);
2617 return inlined;
2620 unsigned int
2621 early_inliner (function *fun)
2623 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2624 struct cgraph_edge *edge;
2625 unsigned int todo = 0;
2626 int iterations = 0;
2627 bool inlined = false;
2629 if (seen_error ())
2630 return 0;
2632 /* Do nothing if datastructures for ipa-inliner are already computed. This
2633 happens when some pass decides to construct new function and
2634 cgraph_add_new_function calls lowering passes and early optimization on
2635 it. This may confuse ourself when early inliner decide to inline call to
2636 function clone, because function clones don't have parameter list in
2637 ipa-prop matching their signature. */
2638 if (ipa_node_params_sum)
2639 return 0;
2641 if (flag_checking)
2642 node->verify ();
2643 node->remove_all_references ();
2645 /* Rebuild this reference because it dosn't depend on
2646 function's body and it's required to pass cgraph_node
2647 verification. */
2648 if (node->instrumented_version
2649 && !node->instrumentation_clone)
2650 node->create_reference (node->instrumented_version, IPA_REF_CHKP, NULL);
2652 /* Even when not optimizing or not inlining inline always-inline
2653 functions. */
2654 inlined = inline_always_inline_functions (node);
2656 if (!optimize
2657 || flag_no_inline
2658 || !flag_early_inlining
2659 /* Never inline regular functions into always-inline functions
2660 during incremental inlining. This sucks as functions calling
2661 always inline functions will get less optimized, but at the
2662 same time inlining of functions calling always inline
2663 function into an always inline function might introduce
2664 cycles of edges to be always inlined in the callgraph.
2666 We might want to be smarter and just avoid this type of inlining. */
2667 || (DECL_DISREGARD_INLINE_LIMITS (node->decl)
2668 && lookup_attribute ("always_inline",
2669 DECL_ATTRIBUTES (node->decl))))
2671 else if (lookup_attribute ("flatten",
2672 DECL_ATTRIBUTES (node->decl)) != NULL)
2674 /* When the function is marked to be flattened, recursively inline
2675 all calls in it. */
2676 if (dump_file)
2677 fprintf (dump_file,
2678 "Flattening %s\n", node->name ());
2679 flatten_function (node, true);
2680 inlined = true;
2682 else
2684 /* If some always_inline functions was inlined, apply the changes.
2685 This way we will not account always inline into growth limits and
2686 moreover we will inline calls from always inlines that we skipped
2687 previously because of conditional above. */
2688 if (inlined)
2690 timevar_push (TV_INTEGRATION);
2691 todo |= optimize_inline_calls (current_function_decl);
2692 /* optimize_inline_calls call above might have introduced new
2693 statements that don't have inline parameters computed. */
2694 for (edge = node->callees; edge; edge = edge->next_callee)
2696 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
2697 es->call_stmt_size
2698 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2699 es->call_stmt_time
2700 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2702 inline_update_overall_summary (node);
2703 inlined = false;
2704 timevar_pop (TV_INTEGRATION);
2706 /* We iterate incremental inlining to get trivial cases of indirect
2707 inlining. */
2708 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
2709 && early_inline_small_functions (node))
2711 timevar_push (TV_INTEGRATION);
2712 todo |= optimize_inline_calls (current_function_decl);
2714 /* Technically we ought to recompute inline parameters so the new
2715 iteration of early inliner works as expected. We however have
2716 values approximately right and thus we only need to update edge
2717 info that might be cleared out for newly discovered edges. */
2718 for (edge = node->callees; edge; edge = edge->next_callee)
2720 /* We have no summary for new bound store calls yet. */
2721 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
2722 es->call_stmt_size
2723 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2724 es->call_stmt_time
2725 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2727 if (edge->callee->decl
2728 && !gimple_check_call_matching_types (
2729 edge->call_stmt, edge->callee->decl, false))
2731 edge->inline_failed = CIF_MISMATCHED_ARGUMENTS;
2732 edge->call_stmt_cannot_inline_p = true;
2735 if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
2736 inline_update_overall_summary (node);
2737 timevar_pop (TV_INTEGRATION);
2738 iterations++;
2739 inlined = false;
2741 if (dump_file)
2742 fprintf (dump_file, "Iterations: %i\n", iterations);
2745 if (inlined)
2747 timevar_push (TV_INTEGRATION);
2748 todo |= optimize_inline_calls (current_function_decl);
2749 timevar_pop (TV_INTEGRATION);
2752 fun->always_inline_functions_inlined = true;
2754 return todo;
2757 /* Do inlining of small functions. Doing so early helps profiling and other
2758 passes to be somewhat more effective and avoids some code duplication in
2759 later real inlining pass for testcases with very many function calls. */
2761 namespace {
2763 const pass_data pass_data_early_inline =
2765 GIMPLE_PASS, /* type */
2766 "einline", /* name */
2767 OPTGROUP_INLINE, /* optinfo_flags */
2768 TV_EARLY_INLINING, /* tv_id */
2769 PROP_ssa, /* properties_required */
2770 0, /* properties_provided */
2771 0, /* properties_destroyed */
2772 0, /* todo_flags_start */
2773 0, /* todo_flags_finish */
2776 class pass_early_inline : public gimple_opt_pass
2778 public:
2779 pass_early_inline (gcc::context *ctxt)
2780 : gimple_opt_pass (pass_data_early_inline, ctxt)
2783 /* opt_pass methods: */
2784 virtual unsigned int execute (function *);
2786 }; // class pass_early_inline
2788 unsigned int
2789 pass_early_inline::execute (function *fun)
2791 return early_inliner (fun);
2794 } // anon namespace
2796 gimple_opt_pass *
2797 make_pass_early_inline (gcc::context *ctxt)
2799 return new pass_early_inline (ctxt);
2802 namespace {
2804 const pass_data pass_data_ipa_inline =
2806 IPA_PASS, /* type */
2807 "inline", /* name */
2808 OPTGROUP_INLINE, /* optinfo_flags */
2809 TV_IPA_INLINING, /* tv_id */
2810 0, /* properties_required */
2811 0, /* properties_provided */
2812 0, /* properties_destroyed */
2813 0, /* todo_flags_start */
2814 ( TODO_dump_symtab ), /* todo_flags_finish */
2817 class pass_ipa_inline : public ipa_opt_pass_d
2819 public:
2820 pass_ipa_inline (gcc::context *ctxt)
2821 : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
2822 inline_generate_summary, /* generate_summary */
2823 inline_write_summary, /* write_summary */
2824 inline_read_summary, /* read_summary */
2825 NULL, /* write_optimization_summary */
2826 NULL, /* read_optimization_summary */
2827 NULL, /* stmt_fixup */
2828 0, /* function_transform_todo_flags_start */
2829 inline_transform, /* function_transform */
2830 NULL) /* variable_transform */
2833 /* opt_pass methods: */
2834 virtual unsigned int execute (function *) { return ipa_inline (); }
2836 }; // class pass_ipa_inline
2838 } // anon namespace
2840 ipa_opt_pass_d *
2841 make_pass_ipa_inline (gcc::context *ctxt)
2843 return new pass_ipa_inline (ctxt);