PR tree-optimization/78496
[official-gcc.git] / gcc / ipa-inline.c
blob2be02b627c2c8089dae69da64b882e67554749d5
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
29 on).
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
34 inlining.
36 inlining heuristics
38 The inliner itself is split into two passes:
40 pass_early_inlining
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
55 flattening.
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
60 optimizers.
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
68 pass_ipa_inline
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "backend.h"
96 #include "target.h"
97 #include "rtl.h"
98 #include "tree.h"
99 #include "gimple.h"
100 #include "alloc-pool.h"
101 #include "tree-pass.h"
102 #include "gimple-ssa.h"
103 #include "cgraph.h"
104 #include "lto-streamer.h"
105 #include "trans-mem.h"
106 #include "calls.h"
107 #include "tree-inline.h"
108 #include "params.h"
109 #include "profile.h"
110 #include "symbol-summary.h"
111 #include "tree-vrp.h"
112 #include "ipa-prop.h"
113 #include "ipa-inline.h"
114 #include "ipa-utils.h"
115 #include "sreal.h"
116 #include "auto-profile.h"
117 #include "builtins.h"
118 #include "fibonacci_heap.h"
120 typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
121 typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
123 /* Statistics we collect about inlining algorithm. */
124 static int overall_size;
125 static gcov_type max_count;
126 static gcov_type spec_rem;
128 /* Pre-computed constants 1/CGRAPH_FREQ_BASE and 1/100. */
129 static sreal cgraph_freq_base_rec, percent_rec;
131 /* Return false when inlining edge E would lead to violating
132 limits on function unit growth or stack usage growth.
134 The relative function body growth limit is present generally
135 to avoid problems with non-linear behavior of the compiler.
136 To allow inlining huge functions into tiny wrapper, the limit
137 is always based on the bigger of the two functions considered.
139 For stack growth limits we always base the growth in stack usage
140 of the callers. We want to prevent applications from segfaulting
141 on stack overflow when functions with huge stack frames gets
142 inlined. */
144 static bool
145 caller_growth_limits (struct cgraph_edge *e)
147 struct cgraph_node *to = e->caller;
148 struct cgraph_node *what = e->callee->ultimate_alias_target ();
149 int newsize;
150 int limit = 0;
151 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
152 inline_summary *info, *what_info, *outer_info = inline_summaries->get (to);
154 /* Look for function e->caller is inlined to. While doing
155 so work out the largest function body on the way. As
156 described above, we want to base our function growth
157 limits based on that. Not on the self size of the
158 outer function, not on the self size of inline code
159 we immediately inline to. This is the most relaxed
160 interpretation of the rule "do not grow large functions
161 too much in order to prevent compiler from exploding". */
162 while (true)
164 info = inline_summaries->get (to);
165 if (limit < info->self_size)
166 limit = info->self_size;
167 if (stack_size_limit < info->estimated_self_stack_size)
168 stack_size_limit = info->estimated_self_stack_size;
169 if (to->global.inlined_to)
170 to = to->callers->caller;
171 else
172 break;
175 what_info = inline_summaries->get (what);
177 if (limit < what_info->self_size)
178 limit = what_info->self_size;
180 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
182 /* Check the size after inlining against the function limits. But allow
183 the function to shrink if it went over the limits by forced inlining. */
184 newsize = estimate_size_after_inlining (to, e);
185 if (newsize >= info->size
186 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
187 && newsize > limit)
189 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
190 return false;
193 if (!what_info->estimated_stack_size)
194 return true;
196 /* FIXME: Stack size limit often prevents inlining in Fortran programs
197 due to large i/o datastructures used by the Fortran front-end.
198 We ought to ignore this limit when we know that the edge is executed
199 on every invocation of the caller (i.e. its call statement dominates
200 exit block). We do not track this information, yet. */
201 stack_size_limit += ((gcov_type)stack_size_limit
202 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
204 inlined_stack = (outer_info->stack_frame_offset
205 + outer_info->estimated_self_stack_size
206 + what_info->estimated_stack_size);
207 /* Check new stack consumption with stack consumption at the place
208 stack is used. */
209 if (inlined_stack > stack_size_limit
210 /* If function already has large stack usage from sibling
211 inline call, we can inline, too.
212 This bit overoptimistically assume that we are good at stack
213 packing. */
214 && inlined_stack > info->estimated_stack_size
215 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
217 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
218 return false;
220 return true;
223 /* Dump info about why inlining has failed. */
225 static void
226 report_inline_failed_reason (struct cgraph_edge *e)
228 if (dump_file)
230 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
231 xstrdup_for_dump (e->caller->name ()), e->caller->order,
232 xstrdup_for_dump (e->callee->name ()), e->callee->order,
233 cgraph_inline_failed_string (e->inline_failed));
234 if ((e->inline_failed == CIF_TARGET_OPTION_MISMATCH
235 || e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
236 && e->caller->lto_file_data
237 && e->callee->ultimate_alias_target ()->lto_file_data)
239 fprintf (dump_file, " LTO objects: %s, %s\n",
240 e->caller->lto_file_data->file_name,
241 e->callee->ultimate_alias_target ()->lto_file_data->file_name);
243 if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH)
244 cl_target_option_print_diff
245 (dump_file, 2, target_opts_for_fn (e->caller->decl),
246 target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
247 if (e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
248 cl_optimization_print_diff
249 (dump_file, 2, opts_for_fn (e->caller->decl),
250 opts_for_fn (e->callee->ultimate_alias_target ()->decl));
254 /* Decide whether sanitizer-related attributes allow inlining. */
256 static bool
257 sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
259 /* Don't care if sanitizer is disabled */
260 if (!(flag_sanitize & SANITIZE_ADDRESS))
261 return true;
263 if (!caller || !callee)
264 return true;
266 return !!lookup_attribute ("no_sanitize_address",
267 DECL_ATTRIBUTES (caller)) ==
268 !!lookup_attribute ("no_sanitize_address",
269 DECL_ATTRIBUTES (callee));
272 /* Used for flags where it is safe to inline when caller's value is
273 grater than callee's. */
274 #define check_maybe_up(flag) \
275 (opts_for_fn (caller->decl)->x_##flag \
276 != opts_for_fn (callee->decl)->x_##flag \
277 && (!always_inline \
278 || opts_for_fn (caller->decl)->x_##flag \
279 < opts_for_fn (callee->decl)->x_##flag))
280 /* Used for flags where it is safe to inline when caller's value is
281 smaller than callee's. */
282 #define check_maybe_down(flag) \
283 (opts_for_fn (caller->decl)->x_##flag \
284 != opts_for_fn (callee->decl)->x_##flag \
285 && (!always_inline \
286 || opts_for_fn (caller->decl)->x_##flag \
287 > opts_for_fn (callee->decl)->x_##flag))
288 /* Used for flags where exact match is needed for correctness. */
289 #define check_match(flag) \
290 (opts_for_fn (caller->decl)->x_##flag \
291 != opts_for_fn (callee->decl)->x_##flag)
293 /* Decide if we can inline the edge and possibly update
294 inline_failed reason.
295 We check whether inlining is possible at all and whether
296 caller growth limits allow doing so.
298 if REPORT is true, output reason to the dump file.
300 if DISREGARD_LIMITS is true, ignore size limits.*/
302 static bool
303 can_inline_edge_p (struct cgraph_edge *e, bool report,
304 bool disregard_limits = false, bool early = false)
306 gcc_checking_assert (e->inline_failed);
308 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
310 if (report)
311 report_inline_failed_reason (e);
312 return false;
315 bool inlinable = true;
316 enum availability avail;
317 cgraph_node *caller = e->caller->global.inlined_to
318 ? e->caller->global.inlined_to : e->caller;
319 cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller);
320 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl);
321 tree callee_tree
322 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
324 if (!callee->definition)
326 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
327 inlinable = false;
329 else if (callee->calls_comdat_local)
331 e->inline_failed = CIF_USES_COMDAT_LOCAL;
332 inlinable = false;
334 else if (avail <= AVAIL_INTERPOSABLE)
336 e->inline_failed = CIF_OVERWRITABLE;
337 inlinable = false;
339 /* All edges with call_stmt_cannot_inline_p should have inline_failed
340 initialized to one of FINAL_ERROR reasons. */
341 else if (e->call_stmt_cannot_inline_p)
342 gcc_unreachable ();
343 /* Don't inline if the functions have different EH personalities. */
344 else if (DECL_FUNCTION_PERSONALITY (caller->decl)
345 && DECL_FUNCTION_PERSONALITY (callee->decl)
346 && (DECL_FUNCTION_PERSONALITY (caller->decl)
347 != DECL_FUNCTION_PERSONALITY (callee->decl)))
349 e->inline_failed = CIF_EH_PERSONALITY;
350 inlinable = false;
352 /* TM pure functions should not be inlined into non-TM_pure
353 functions. */
354 else if (is_tm_pure (callee->decl) && !is_tm_pure (caller->decl))
356 e->inline_failed = CIF_UNSPECIFIED;
357 inlinable = false;
359 /* Check compatibility of target optimization options. */
360 else if (!targetm.target_option.can_inline_p (caller->decl,
361 callee->decl))
363 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
364 inlinable = false;
366 else if (!inline_summaries->get (callee)->inlinable)
368 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
369 inlinable = false;
371 /* Don't inline a function with mismatched sanitization attributes. */
372 else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl))
374 e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
375 inlinable = false;
377 /* Check if caller growth allows the inlining. */
378 else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
379 && !disregard_limits
380 && !lookup_attribute ("flatten",
381 DECL_ATTRIBUTES (caller->decl))
382 && !caller_growth_limits (e))
383 inlinable = false;
384 /* Don't inline a function with a higher optimization level than the
385 caller. FIXME: this is really just tip of iceberg of handling
386 optimization attribute. */
387 else if (caller_tree != callee_tree)
389 bool always_inline =
390 (DECL_DISREGARD_INLINE_LIMITS (callee->decl)
391 && lookup_attribute ("always_inline",
392 DECL_ATTRIBUTES (callee->decl)));
393 inline_summary *caller_info = inline_summaries->get (caller);
394 inline_summary *callee_info = inline_summaries->get (callee);
396 /* Until GCC 4.9 we did not check the semantics alterning flags
397 bellow and inline across optimization boundry.
398 Enabling checks bellow breaks several packages by refusing
399 to inline library always_inline functions. See PR65873.
400 Disable the check for early inlining for now until better solution
401 is found. */
402 if (always_inline && early)
404 /* There are some options that change IL semantics which means
405 we cannot inline in these cases for correctness reason.
406 Not even for always_inline declared functions. */
407 else if (check_match (flag_wrapv)
408 || check_match (flag_trapv)
409 /* When caller or callee does FP math, be sure FP codegen flags
410 compatible. */
411 || ((caller_info->fp_expressions && callee_info->fp_expressions)
412 && (check_maybe_up (flag_rounding_math)
413 || check_maybe_up (flag_trapping_math)
414 || check_maybe_down (flag_unsafe_math_optimizations)
415 || check_maybe_down (flag_finite_math_only)
416 || check_maybe_up (flag_signaling_nans)
417 || check_maybe_down (flag_cx_limited_range)
418 || check_maybe_up (flag_signed_zeros)
419 || check_maybe_down (flag_associative_math)
420 || check_maybe_down (flag_reciprocal_math)
421 || check_maybe_down (flag_fp_int_builtin_inexact)
422 /* Strictly speaking only when the callee contains function
423 calls that may end up setting errno. */
424 || check_maybe_up (flag_errno_math)))
425 /* We do not want to make code compiled with exceptions to be
426 brought into a non-EH function unless we know that the callee
427 does not throw.
428 This is tracked by DECL_FUNCTION_PERSONALITY. */
429 || (check_maybe_up (flag_non_call_exceptions)
430 && DECL_FUNCTION_PERSONALITY (callee->decl))
431 || (check_maybe_up (flag_exceptions)
432 && DECL_FUNCTION_PERSONALITY (callee->decl))
433 /* When devirtualization is diabled for callee, it is not safe
434 to inline it as we possibly mangled the type info.
435 Allow early inlining of always inlines. */
436 || (!early && check_maybe_down (flag_devirtualize)))
438 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
439 inlinable = false;
441 /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */
442 else if (always_inline)
444 /* When user added an attribute to the callee honor it. */
445 else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee->decl))
446 && opts_for_fn (caller->decl) != opts_for_fn (callee->decl))
448 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
449 inlinable = false;
451 /* If explicit optimize attribute are not used, the mismatch is caused
452 by different command line options used to build different units.
453 Do not care about COMDAT functions - those are intended to be
454 optimized with the optimization flags of module they are used in.
455 Also do not care about mixing up size/speed optimization when
456 DECL_DISREGARD_INLINE_LIMITS is set. */
457 else if ((callee->merged_comdat
458 && !lookup_attribute ("optimize",
459 DECL_ATTRIBUTES (caller->decl)))
460 || DECL_DISREGARD_INLINE_LIMITS (callee->decl))
462 /* If mismatch is caused by merging two LTO units with different
463 optimizationflags we want to be bit nicer. However never inline
464 if one of functions is not optimized at all. */
465 else if (!opt_for_fn (callee->decl, optimize)
466 || !opt_for_fn (caller->decl, optimize))
468 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
469 inlinable = false;
471 /* If callee is optimized for size and caller is not, allow inlining if
472 code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee
473 is inline (and thus likely an unified comdat). This will allow caller
474 to run faster. */
475 else if (opt_for_fn (callee->decl, optimize_size)
476 > opt_for_fn (caller->decl, optimize_size))
478 int growth = estimate_edge_growth (e);
479 if (growth > 0
480 && (!DECL_DECLARED_INLINE_P (callee->decl)
481 && growth >= MAX (MAX_INLINE_INSNS_SINGLE,
482 MAX_INLINE_INSNS_AUTO)))
484 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
485 inlinable = false;
488 /* If callee is more aggressively optimized for performance than caller,
489 we generally want to inline only cheap (runtime wise) functions. */
490 else if (opt_for_fn (callee->decl, optimize_size)
491 < opt_for_fn (caller->decl, optimize_size)
492 || (opt_for_fn (callee->decl, optimize)
493 > opt_for_fn (caller->decl, optimize)))
495 if (estimate_edge_time (e)
496 >= 20 + inline_edge_summary (e)->call_stmt_time)
498 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
499 inlinable = false;
505 if (!inlinable && report)
506 report_inline_failed_reason (e);
507 return inlinable;
511 /* Return true if the edge E is inlinable during early inlining. */
513 static bool
514 can_early_inline_edge_p (struct cgraph_edge *e)
516 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
517 /* Early inliner might get called at WPA stage when IPA pass adds new
518 function. In this case we can not really do any of early inlining
519 because function bodies are missing. */
520 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
521 return false;
522 if (!gimple_has_body_p (callee->decl))
524 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
525 return false;
527 /* In early inliner some of callees may not be in SSA form yet
528 (i.e. the callgraph is cyclic and we did not process
529 the callee by early inliner, yet). We don't have CIF code for this
530 case; later we will re-do the decision in the real inliner. */
531 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
532 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
534 if (dump_file)
535 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
536 return false;
538 if (!can_inline_edge_p (e, true, false, true))
539 return false;
540 return true;
544 /* Return number of calls in N. Ignore cheap builtins. */
546 static int
547 num_calls (struct cgraph_node *n)
549 struct cgraph_edge *e;
550 int num = 0;
552 for (e = n->callees; e; e = e->next_callee)
553 if (!is_inexpensive_builtin (e->callee->decl))
554 num++;
555 return num;
559 /* Return true if we are interested in inlining small function. */
561 static bool
562 want_early_inline_function_p (struct cgraph_edge *e)
564 bool want_inline = true;
565 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
567 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
569 /* For AutoFDO, we need to make sure that before profile summary, all
570 hot paths' IR look exactly the same as profiled binary. As a result,
571 in einliner, we will disregard size limit and inline those callsites
572 that are:
573 * inlined in the profiled binary, and
574 * the cloned callee has enough samples to be considered "hot". */
575 else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e))
577 else if (!DECL_DECLARED_INLINE_P (callee->decl)
578 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
580 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
581 report_inline_failed_reason (e);
582 want_inline = false;
584 else
586 int growth = estimate_edge_growth (e);
587 int n;
589 if (growth <= 0)
591 else if (!e->maybe_hot_p ()
592 && growth > 0)
594 if (dump_file)
595 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
596 "call is cold and code would grow by %i\n",
597 xstrdup_for_dump (e->caller->name ()),
598 e->caller->order,
599 xstrdup_for_dump (callee->name ()), callee->order,
600 growth);
601 want_inline = false;
603 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
605 if (dump_file)
606 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
607 "growth %i exceeds --param early-inlining-insns\n",
608 xstrdup_for_dump (e->caller->name ()),
609 e->caller->order,
610 xstrdup_for_dump (callee->name ()), callee->order,
611 growth);
612 want_inline = false;
614 else if ((n = num_calls (callee)) != 0
615 && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
617 if (dump_file)
618 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
619 "growth %i exceeds --param early-inlining-insns "
620 "divided by number of calls\n",
621 xstrdup_for_dump (e->caller->name ()),
622 e->caller->order,
623 xstrdup_for_dump (callee->name ()), callee->order,
624 growth);
625 want_inline = false;
628 return want_inline;
631 /* Compute time of the edge->caller + edge->callee execution when inlining
632 does not happen. */
634 inline sreal
635 compute_uninlined_call_time (struct cgraph_edge *edge,
636 sreal uninlined_call_time)
638 cgraph_node *caller = (edge->caller->global.inlined_to
639 ? edge->caller->global.inlined_to
640 : edge->caller);
642 if (edge->count && caller->count)
643 uninlined_call_time *= (sreal)edge->count / caller->count;
644 if (edge->frequency)
645 uninlined_call_time *= cgraph_freq_base_rec * edge->frequency;
646 else
647 uninlined_call_time = uninlined_call_time >> 11;
649 sreal caller_time = inline_summaries->get (caller)->time;
650 return uninlined_call_time + caller_time;
653 /* Same as compute_uinlined_call_time but compute time when inlining
654 does happen. */
656 inline sreal
657 compute_inlined_call_time (struct cgraph_edge *edge,
658 sreal time)
660 cgraph_node *caller = (edge->caller->global.inlined_to
661 ? edge->caller->global.inlined_to
662 : edge->caller);
663 sreal caller_time = inline_summaries->get (caller)->time;
665 if (edge->count && caller->count)
666 time *= (sreal)edge->count / caller->count;
667 if (edge->frequency)
668 time *= cgraph_freq_base_rec * edge->frequency;
669 else
670 time = time >> 11;
672 /* This calculation should match one in ipa-inline-analysis.c
673 (estimate_edge_size_and_time). */
674 time -= (sreal) edge->frequency
675 * inline_edge_summary (edge)->call_stmt_time / CGRAPH_FREQ_BASE;
676 time += caller_time;
677 if (time <= 0)
678 time = ((sreal) 1) >> 8;
679 gcc_checking_assert (time >= 0);
680 return time;
683 /* Return true if the speedup for inlining E is bigger than
684 PARAM_MAX_INLINE_MIN_SPEEDUP. */
686 static bool
687 big_speedup_p (struct cgraph_edge *e)
689 sreal unspec_time;
690 sreal spec_time = estimate_edge_time (e, &unspec_time);
691 sreal time = compute_uninlined_call_time (e, unspec_time);
692 sreal inlined_time = compute_inlined_call_time (e, spec_time);
694 if (time - inlined_time
695 > (sreal) (time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP))
696 * percent_rec)
697 return true;
698 return false;
701 /* Return true if we are interested in inlining small function.
702 When REPORT is true, report reason to dump file. */
704 static bool
705 want_inline_small_function_p (struct cgraph_edge *e, bool report)
707 bool want_inline = true;
708 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
710 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
712 else if (!DECL_DECLARED_INLINE_P (callee->decl)
713 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
715 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
716 want_inline = false;
718 /* Do fast and conservative check if the function can be good
719 inline candidate. At the moment we allow inline hints to
720 promote non-inline functions to inline and we increase
721 MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
722 else if ((!DECL_DECLARED_INLINE_P (callee->decl)
723 && (!e->count || !e->maybe_hot_p ()))
724 && inline_summaries->get (callee)->min_size
725 - inline_edge_summary (e)->call_stmt_size
726 > MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
728 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
729 want_inline = false;
731 else if ((DECL_DECLARED_INLINE_P (callee->decl) || e->count)
732 && inline_summaries->get (callee)->min_size
733 - inline_edge_summary (e)->call_stmt_size
734 > 16 * MAX_INLINE_INSNS_SINGLE)
736 e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
737 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
738 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
739 want_inline = false;
741 else
743 int growth = estimate_edge_growth (e);
744 inline_hints hints = estimate_edge_hints (e);
745 bool big_speedup = big_speedup_p (e);
747 if (growth <= 0)
749 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
750 hints suggests that inlining given function is very profitable. */
751 else if (DECL_DECLARED_INLINE_P (callee->decl)
752 && growth >= MAX_INLINE_INSNS_SINGLE
753 && ((!big_speedup
754 && !(hints & (INLINE_HINT_indirect_call
755 | INLINE_HINT_known_hot
756 | INLINE_HINT_loop_iterations
757 | INLINE_HINT_array_index
758 | INLINE_HINT_loop_stride)))
759 || growth >= MAX_INLINE_INSNS_SINGLE * 16))
761 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
762 want_inline = false;
764 else if (!DECL_DECLARED_INLINE_P (callee->decl)
765 && !opt_for_fn (e->caller->decl, flag_inline_functions))
767 /* growth_likely_positive is expensive, always test it last. */
768 if (growth >= MAX_INLINE_INSNS_SINGLE
769 || growth_likely_positive (callee, growth))
771 e->inline_failed = CIF_NOT_DECLARED_INLINED;
772 want_inline = false;
775 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
776 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
777 inlining given function is very profitable. */
778 else if (!DECL_DECLARED_INLINE_P (callee->decl)
779 && !big_speedup
780 && !(hints & INLINE_HINT_known_hot)
781 && growth >= ((hints & (INLINE_HINT_indirect_call
782 | INLINE_HINT_loop_iterations
783 | INLINE_HINT_array_index
784 | INLINE_HINT_loop_stride))
785 ? MAX (MAX_INLINE_INSNS_AUTO,
786 MAX_INLINE_INSNS_SINGLE)
787 : MAX_INLINE_INSNS_AUTO))
789 /* growth_likely_positive is expensive, always test it last. */
790 if (growth >= MAX_INLINE_INSNS_SINGLE
791 || growth_likely_positive (callee, growth))
793 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
794 want_inline = false;
797 /* If call is cold, do not inline when function body would grow. */
798 else if (!e->maybe_hot_p ()
799 && (growth >= MAX_INLINE_INSNS_SINGLE
800 || growth_likely_positive (callee, growth)))
802 e->inline_failed = CIF_UNLIKELY_CALL;
803 want_inline = false;
806 if (!want_inline && report)
807 report_inline_failed_reason (e);
808 return want_inline;
811 /* EDGE is self recursive edge.
812 We hand two cases - when function A is inlining into itself
813 or when function A is being inlined into another inliner copy of function
814 A within function B.
816 In first case OUTER_NODE points to the toplevel copy of A, while
817 in the second case OUTER_NODE points to the outermost copy of A in B.
819 In both cases we want to be extra selective since
820 inlining the call will just introduce new recursive calls to appear. */
822 static bool
823 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
824 struct cgraph_node *outer_node,
825 bool peeling,
826 int depth)
828 char const *reason = NULL;
829 bool want_inline = true;
830 int caller_freq = CGRAPH_FREQ_BASE;
831 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
833 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
834 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
836 if (!edge->maybe_hot_p ())
838 reason = "recursive call is cold";
839 want_inline = false;
841 else if (max_count && !outer_node->count)
843 reason = "not executed in profile";
844 want_inline = false;
846 else if (depth > max_depth)
848 reason = "--param max-inline-recursive-depth exceeded.";
849 want_inline = false;
852 if (outer_node->global.inlined_to)
853 caller_freq = outer_node->callers->frequency;
855 if (!caller_freq)
857 reason = "function is inlined and unlikely";
858 want_inline = false;
861 if (!want_inline)
863 /* Inlining of self recursive function into copy of itself within other function
864 is transformation similar to loop peeling.
866 Peeling is profitable if we can inline enough copies to make probability
867 of actual call to the self recursive function very small. Be sure that
868 the probability of recursion is small.
870 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
871 This way the expected number of recision is at most max_depth. */
872 else if (peeling)
874 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
875 / max_depth);
876 int i;
877 for (i = 1; i < depth; i++)
878 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
879 if (max_count
880 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
881 >= max_prob))
883 reason = "profile of recursive call is too large";
884 want_inline = false;
886 if (!max_count
887 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
888 >= max_prob))
890 reason = "frequency of recursive call is too large";
891 want_inline = false;
894 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
895 depth is large. We reduce function call overhead and increase chances that
896 things fit in hardware return predictor.
898 Recursive inlining might however increase cost of stack frame setup
899 actually slowing down functions whose recursion tree is wide rather than
900 deep.
902 Deciding reliably on when to do recursive inlining without profile feedback
903 is tricky. For now we disable recursive inlining when probability of self
904 recursion is low.
906 Recursive inlining of self recursive call within loop also results in large loop
907 depths that generally optimize badly. We may want to throttle down inlining
908 in those cases. In particular this seems to happen in one of libstdc++ rb tree
909 methods. */
910 else
912 if (max_count
913 && (edge->count * 100 / outer_node->count
914 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
916 reason = "profile of recursive call is too small";
917 want_inline = false;
919 else if (!max_count
920 && (edge->frequency * 100 / caller_freq
921 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
923 reason = "frequency of recursive call is too small";
924 want_inline = false;
927 if (!want_inline && dump_file)
928 fprintf (dump_file, " not inlining recursively: %s\n", reason);
929 return want_inline;
932 /* Return true when NODE has uninlinable caller;
933 set HAS_HOT_CALL if it has hot call.
934 Worker for cgraph_for_node_and_aliases. */
936 static bool
937 check_callers (struct cgraph_node *node, void *has_hot_call)
939 struct cgraph_edge *e;
940 for (e = node->callers; e; e = e->next_caller)
942 if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once))
943 return true;
944 if (!can_inline_edge_p (e, true))
945 return true;
946 if (e->recursive_p ())
947 return true;
948 if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
949 *(bool *)has_hot_call = true;
951 return false;
954 /* If NODE has a caller, return true. */
956 static bool
957 has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
959 if (node->callers)
960 return true;
961 return false;
964 /* Decide if inlining NODE would reduce unit size by eliminating
965 the offline copy of function.
966 When COLD is true the cold calls are considered, too. */
968 static bool
969 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
971 bool has_hot_call = false;
973 /* Aliases gets inlined along with the function they alias. */
974 if (node->alias)
975 return false;
976 /* Already inlined? */
977 if (node->global.inlined_to)
978 return false;
979 /* Does it have callers? */
980 if (!node->call_for_symbol_and_aliases (has_caller_p, NULL, true))
981 return false;
982 /* Inlining into all callers would increase size? */
983 if (estimate_growth (node) > 0)
984 return false;
985 /* All inlines must be possible. */
986 if (node->call_for_symbol_and_aliases (check_callers, &has_hot_call,
987 true))
988 return false;
989 if (!cold && !has_hot_call)
990 return false;
991 return true;
994 /* A cost model driving the inlining heuristics in a way so the edges with
995 smallest badness are inlined first. After each inlining is performed
996 the costs of all caller edges of nodes affected are recomputed so the
997 metrics may accurately depend on values such as number of inlinable callers
998 of the function or function body size. */
1000 static sreal
1001 edge_badness (struct cgraph_edge *edge, bool dump)
1003 sreal badness;
1004 int growth;
1005 sreal edge_time, unspec_edge_time;
1006 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1007 struct inline_summary *callee_info = inline_summaries->get (callee);
1008 inline_hints hints;
1009 cgraph_node *caller = (edge->caller->global.inlined_to
1010 ? edge->caller->global.inlined_to
1011 : edge->caller);
1013 growth = estimate_edge_growth (edge);
1014 edge_time = estimate_edge_time (edge, &unspec_edge_time);
1015 hints = estimate_edge_hints (edge);
1016 gcc_checking_assert (edge_time >= 0);
1017 /* Check that inlined time is better, but tolerate some roundoff issues. */
1018 gcc_checking_assert ((edge_time - callee_info->time).to_int () <= 0);
1019 gcc_checking_assert (growth <= callee_info->size);
1021 if (dump)
1023 fprintf (dump_file, " Badness calculation for %s/%i -> %s/%i\n",
1024 xstrdup_for_dump (edge->caller->name ()),
1025 edge->caller->order,
1026 xstrdup_for_dump (callee->name ()),
1027 edge->callee->order);
1028 fprintf (dump_file, " size growth %i, time %f unspec %f ",
1029 growth,
1030 edge_time.to_double (),
1031 unspec_edge_time.to_double ());
1032 dump_inline_hints (dump_file, hints);
1033 if (big_speedup_p (edge))
1034 fprintf (dump_file, " big_speedup");
1035 fprintf (dump_file, "\n");
1038 /* Always prefer inlining saving code size. */
1039 if (growth <= 0)
1041 badness = (sreal) (-SREAL_MIN_SIG + growth) << (SREAL_MAX_EXP / 256);
1042 if (dump)
1043 fprintf (dump_file, " %f: Growth %d <= 0\n", badness.to_double (),
1044 growth);
1046 /* Inlining into EXTERNAL functions is not going to change anything unless
1047 they are themselves inlined. */
1048 else if (DECL_EXTERNAL (caller->decl))
1050 if (dump)
1051 fprintf (dump_file, " max: function is external\n");
1052 return sreal::max ();
1054 /* When profile is available. Compute badness as:
1056 time_saved * caller_count
1057 goodness = -------------------------------------------------
1058 growth_of_caller * overall_growth * combined_size
1060 badness = - goodness
1062 Again use negative value to make calls with profile appear hotter
1063 then calls without.
1065 else if (opt_for_fn (caller->decl, flag_guess_branch_prob) || caller->count)
1067 sreal numerator, denominator;
1068 int overall_growth;
1070 numerator = (compute_uninlined_call_time (edge, unspec_edge_time)
1071 - compute_inlined_call_time (edge, edge_time));
1072 if (numerator == 0)
1073 numerator = ((sreal) 1 >> 8);
1074 if (caller->count)
1075 numerator *= caller->count;
1076 else if (opt_for_fn (caller->decl, flag_branch_probabilities))
1077 numerator = numerator >> 11;
1078 denominator = growth;
1080 overall_growth = callee_info->growth;
1082 /* Look for inliner wrappers of the form:
1084 inline_caller ()
1086 do_fast_job...
1087 if (need_more_work)
1088 noninline_callee ();
1090 Withhout panilizing this case, we usually inline noninline_callee
1091 into the inline_caller because overall_growth is small preventing
1092 further inlining of inline_caller.
1094 Penalize only callgraph edges to functions with small overall
1095 growth ...
1097 if (growth > overall_growth
1098 /* ... and having only one caller which is not inlined ... */
1099 && callee_info->single_caller
1100 && !edge->caller->global.inlined_to
1101 /* ... and edges executed only conditionally ... */
1102 && edge->frequency < CGRAPH_FREQ_BASE
1103 /* ... consider case where callee is not inline but caller is ... */
1104 && ((!DECL_DECLARED_INLINE_P (edge->callee->decl)
1105 && DECL_DECLARED_INLINE_P (caller->decl))
1106 /* ... or when early optimizers decided to split and edge
1107 frequency still indicates splitting is a win ... */
1108 || (callee->split_part && !caller->split_part
1109 && edge->frequency
1110 < CGRAPH_FREQ_BASE
1111 * PARAM_VALUE
1112 (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY) / 100
1113 /* ... and do not overwrite user specified hints. */
1114 && (!DECL_DECLARED_INLINE_P (edge->callee->decl)
1115 || DECL_DECLARED_INLINE_P (caller->decl)))))
1117 struct inline_summary *caller_info = inline_summaries->get (caller);
1118 int caller_growth = caller_info->growth;
1120 /* Only apply the penalty when caller looks like inline candidate,
1121 and it is not called once and. */
1122 if (!caller_info->single_caller && overall_growth < caller_growth
1123 && caller_info->inlinable
1124 && caller_info->size
1125 < (DECL_DECLARED_INLINE_P (caller->decl)
1126 ? MAX_INLINE_INSNS_SINGLE : MAX_INLINE_INSNS_AUTO))
1128 if (dump)
1129 fprintf (dump_file,
1130 " Wrapper penalty. Increasing growth %i to %i\n",
1131 overall_growth, caller_growth);
1132 overall_growth = caller_growth;
1135 if (overall_growth > 0)
1137 /* Strongly preffer functions with few callers that can be inlined
1138 fully. The square root here leads to smaller binaries at average.
1139 Watch however for extreme cases and return to linear function
1140 when growth is large. */
1141 if (overall_growth < 256)
1142 overall_growth *= overall_growth;
1143 else
1144 overall_growth += 256 * 256 - 256;
1145 denominator *= overall_growth;
1147 denominator *= inline_summaries->get (caller)->self_size + growth;
1149 badness = - numerator / denominator;
1151 if (dump)
1153 fprintf (dump_file,
1154 " %f: guessed profile. frequency %f, count %" PRId64
1155 " caller count %" PRId64
1156 " time w/o inlining %f, time with inlining %f"
1157 " overall growth %i (current) %i (original)"
1158 " %i (compensated)\n",
1159 badness.to_double (),
1160 (double)edge->frequency / CGRAPH_FREQ_BASE,
1161 edge->count, caller->count,
1162 compute_uninlined_call_time (edge,
1163 unspec_edge_time).to_double (),
1164 compute_inlined_call_time (edge, edge_time).to_double (),
1165 estimate_growth (callee),
1166 callee_info->growth, overall_growth);
1169 /* When function local profile is not available or it does not give
1170 useful information (ie frequency is zero), base the cost on
1171 loop nest and overall size growth, so we optimize for overall number
1172 of functions fully inlined in program. */
1173 else
1175 int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
1176 badness = growth;
1178 /* Decrease badness if call is nested. */
1179 if (badness > 0)
1180 badness = badness >> nest;
1181 else
1182 badness = badness << nest;
1183 if (dump)
1184 fprintf (dump_file, " %f: no profile. nest %i\n",
1185 badness.to_double (), nest);
1187 gcc_checking_assert (badness != 0);
1189 if (edge->recursive_p ())
1190 badness = badness.shift (badness > 0 ? 4 : -4);
1191 if ((hints & (INLINE_HINT_indirect_call
1192 | INLINE_HINT_loop_iterations
1193 | INLINE_HINT_array_index
1194 | INLINE_HINT_loop_stride))
1195 || callee_info->growth <= 0)
1196 badness = badness.shift (badness > 0 ? -2 : 2);
1197 if (hints & (INLINE_HINT_same_scc))
1198 badness = badness.shift (badness > 0 ? 3 : -3);
1199 else if (hints & (INLINE_HINT_in_scc))
1200 badness = badness.shift (badness > 0 ? 2 : -2);
1201 else if (hints & (INLINE_HINT_cross_module))
1202 badness = badness.shift (badness > 0 ? 1 : -1);
1203 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1204 badness = badness.shift (badness > 0 ? -4 : 4);
1205 else if ((hints & INLINE_HINT_declared_inline))
1206 badness = badness.shift (badness > 0 ? -3 : 3);
1207 if (dump)
1208 fprintf (dump_file, " Adjusted by hints %f\n", badness.to_double ());
1209 return badness;
1212 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1213 static inline void
1214 update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
1216 sreal badness = edge_badness (edge, false);
1217 if (edge->aux)
1219 edge_heap_node_t *n = (edge_heap_node_t *) edge->aux;
1220 gcc_checking_assert (n->get_data () == edge);
1222 /* fibonacci_heap::replace_key does busy updating of the
1223 heap that is unnecesarily expensive.
1224 We do lazy increases: after extracting minimum if the key
1225 turns out to be out of date, it is re-inserted into heap
1226 with correct value. */
1227 if (badness < n->get_key ())
1229 if (dump_file && (dump_flags & TDF_DETAILS))
1231 fprintf (dump_file,
1232 " decreasing badness %s/%i -> %s/%i, %f"
1233 " to %f\n",
1234 xstrdup_for_dump (edge->caller->name ()),
1235 edge->caller->order,
1236 xstrdup_for_dump (edge->callee->name ()),
1237 edge->callee->order,
1238 n->get_key ().to_double (),
1239 badness.to_double ());
1241 heap->decrease_key (n, badness);
1244 else
1246 if (dump_file && (dump_flags & TDF_DETAILS))
1248 fprintf (dump_file,
1249 " enqueuing call %s/%i -> %s/%i, badness %f\n",
1250 xstrdup_for_dump (edge->caller->name ()),
1251 edge->caller->order,
1252 xstrdup_for_dump (edge->callee->name ()),
1253 edge->callee->order,
1254 badness.to_double ());
1256 edge->aux = heap->insert (badness, edge);
1261 /* NODE was inlined.
1262 All caller edges needs to be resetted because
1263 size estimates change. Similarly callees needs reset
1264 because better context may be known. */
1266 static void
1267 reset_edge_caches (struct cgraph_node *node)
1269 struct cgraph_edge *edge;
1270 struct cgraph_edge *e = node->callees;
1271 struct cgraph_node *where = node;
1272 struct ipa_ref *ref;
1274 if (where->global.inlined_to)
1275 where = where->global.inlined_to;
1277 for (edge = where->callers; edge; edge = edge->next_caller)
1278 if (edge->inline_failed)
1279 reset_edge_growth_cache (edge);
1281 FOR_EACH_ALIAS (where, ref)
1282 reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
1284 if (!e)
1285 return;
1287 while (true)
1288 if (!e->inline_failed && e->callee->callees)
1289 e = e->callee->callees;
1290 else
1292 if (e->inline_failed)
1293 reset_edge_growth_cache (e);
1294 if (e->next_callee)
1295 e = e->next_callee;
1296 else
1300 if (e->caller == node)
1301 return;
1302 e = e->caller->callers;
1304 while (!e->next_callee);
1305 e = e->next_callee;
1310 /* Recompute HEAP nodes for each of caller of NODE.
1311 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1312 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1313 it is inlinable. Otherwise check all edges. */
1315 static void
1316 update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
1317 bitmap updated_nodes,
1318 struct cgraph_edge *check_inlinablity_for)
1320 struct cgraph_edge *edge;
1321 struct ipa_ref *ref;
1323 if ((!node->alias && !inline_summaries->get (node)->inlinable)
1324 || node->global.inlined_to)
1325 return;
1326 if (!bitmap_set_bit (updated_nodes, node->uid))
1327 return;
1329 FOR_EACH_ALIAS (node, ref)
1331 struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
1332 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1335 for (edge = node->callers; edge; edge = edge->next_caller)
1336 if (edge->inline_failed)
1338 if (!check_inlinablity_for
1339 || check_inlinablity_for == edge)
1341 if (can_inline_edge_p (edge, false)
1342 && want_inline_small_function_p (edge, false))
1343 update_edge_key (heap, edge);
1344 else if (edge->aux)
1346 report_inline_failed_reason (edge);
1347 heap->delete_node ((edge_heap_node_t *) edge->aux);
1348 edge->aux = NULL;
1351 else if (edge->aux)
1352 update_edge_key (heap, edge);
1356 /* Recompute HEAP nodes for each uninlined call in NODE.
1357 This is used when we know that edge badnesses are going only to increase
1358 (we introduced new call site) and thus all we need is to insert newly
1359 created edges into heap. */
1361 static void
1362 update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
1363 bitmap updated_nodes)
1365 struct cgraph_edge *e = node->callees;
1367 if (!e)
1368 return;
1369 while (true)
1370 if (!e->inline_failed && e->callee->callees)
1371 e = e->callee->callees;
1372 else
1374 enum availability avail;
1375 struct cgraph_node *callee;
1376 /* We do not reset callee growth cache here. Since we added a new call,
1377 growth chould have just increased and consequentely badness metric
1378 don't need updating. */
1379 if (e->inline_failed
1380 && (callee = e->callee->ultimate_alias_target (&avail, e->caller))
1381 && inline_summaries->get (callee)->inlinable
1382 && avail >= AVAIL_AVAILABLE
1383 && !bitmap_bit_p (updated_nodes, callee->uid))
1385 if (can_inline_edge_p (e, false)
1386 && want_inline_small_function_p (e, false))
1387 update_edge_key (heap, e);
1388 else if (e->aux)
1390 report_inline_failed_reason (e);
1391 heap->delete_node ((edge_heap_node_t *) e->aux);
1392 e->aux = NULL;
1395 if (e->next_callee)
1396 e = e->next_callee;
1397 else
1401 if (e->caller == node)
1402 return;
1403 e = e->caller->callers;
1405 while (!e->next_callee);
1406 e = e->next_callee;
1411 /* Enqueue all recursive calls from NODE into priority queue depending on
1412 how likely we want to recursively inline the call. */
1414 static void
1415 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1416 edge_heap_t *heap)
1418 struct cgraph_edge *e;
1419 enum availability avail;
1421 for (e = where->callees; e; e = e->next_callee)
1422 if (e->callee == node
1423 || (e->callee->ultimate_alias_target (&avail, e->caller) == node
1424 && avail > AVAIL_INTERPOSABLE))
1426 /* When profile feedback is available, prioritize by expected number
1427 of calls. */
1428 heap->insert (!max_count ? -e->frequency
1429 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
1432 for (e = where->callees; e; e = e->next_callee)
1433 if (!e->inline_failed)
1434 lookup_recursive_calls (node, e->callee, heap);
1437 /* Decide on recursive inlining: in the case function has recursive calls,
1438 inline until body size reaches given argument. If any new indirect edges
1439 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1440 is NULL. */
1442 static bool
1443 recursive_inlining (struct cgraph_edge *edge,
1444 vec<cgraph_edge *> *new_edges)
1446 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1447 edge_heap_t heap (sreal::min ());
1448 struct cgraph_node *node;
1449 struct cgraph_edge *e;
1450 struct cgraph_node *master_clone = NULL, *next;
1451 int depth = 0;
1452 int n = 0;
1454 node = edge->caller;
1455 if (node->global.inlined_to)
1456 node = node->global.inlined_to;
1458 if (DECL_DECLARED_INLINE_P (node->decl))
1459 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1461 /* Make sure that function is small enough to be considered for inlining. */
1462 if (estimate_size_after_inlining (node, edge) >= limit)
1463 return false;
1464 lookup_recursive_calls (node, node, &heap);
1465 if (heap.empty ())
1466 return false;
1468 if (dump_file)
1469 fprintf (dump_file,
1470 " Performing recursive inlining on %s\n",
1471 node->name ());
1473 /* Do the inlining and update list of recursive call during process. */
1474 while (!heap.empty ())
1476 struct cgraph_edge *curr = heap.extract_min ();
1477 struct cgraph_node *cnode, *dest = curr->callee;
1479 if (!can_inline_edge_p (curr, true))
1480 continue;
1482 /* MASTER_CLONE is produced in the case we already started modified
1483 the function. Be sure to redirect edge to the original body before
1484 estimating growths otherwise we will be seeing growths after inlining
1485 the already modified body. */
1486 if (master_clone)
1488 curr->redirect_callee (master_clone);
1489 reset_edge_growth_cache (curr);
1492 if (estimate_size_after_inlining (node, curr) > limit)
1494 curr->redirect_callee (dest);
1495 reset_edge_growth_cache (curr);
1496 break;
1499 depth = 1;
1500 for (cnode = curr->caller;
1501 cnode->global.inlined_to; cnode = cnode->callers->caller)
1502 if (node->decl
1503 == curr->callee->ultimate_alias_target ()->decl)
1504 depth++;
1506 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1508 curr->redirect_callee (dest);
1509 reset_edge_growth_cache (curr);
1510 continue;
1513 if (dump_file)
1515 fprintf (dump_file,
1516 " Inlining call of depth %i", depth);
1517 if (node->count)
1519 fprintf (dump_file, " called approx. %.2f times per call",
1520 (double)curr->count / node->count);
1522 fprintf (dump_file, "\n");
1524 if (!master_clone)
1526 /* We need original clone to copy around. */
1527 master_clone = node->create_clone (node->decl, node->count,
1528 CGRAPH_FREQ_BASE, false, vNULL,
1529 true, NULL, NULL);
1530 for (e = master_clone->callees; e; e = e->next_callee)
1531 if (!e->inline_failed)
1532 clone_inlined_nodes (e, true, false, NULL, CGRAPH_FREQ_BASE);
1533 curr->redirect_callee (master_clone);
1534 reset_edge_growth_cache (curr);
1537 inline_call (curr, false, new_edges, &overall_size, true);
1538 lookup_recursive_calls (node, curr->callee, &heap);
1539 n++;
1542 if (!heap.empty () && dump_file)
1543 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1545 if (!master_clone)
1546 return false;
1548 if (dump_file)
1549 fprintf (dump_file,
1550 "\n Inlined %i times, "
1551 "body grown from size %i to %i, time %f to %f\n", n,
1552 inline_summaries->get (master_clone)->size,
1553 inline_summaries->get (node)->size,
1554 inline_summaries->get (master_clone)->time.to_double (),
1555 inline_summaries->get (node)->time.to_double ());
1557 /* Remove master clone we used for inlining. We rely that clones inlined
1558 into master clone gets queued just before master clone so we don't
1559 need recursion. */
1560 for (node = symtab->first_function (); node != master_clone;
1561 node = next)
1563 next = symtab->next_function (node);
1564 if (node->global.inlined_to == master_clone)
1565 node->remove ();
1567 master_clone->remove ();
1568 return true;
1572 /* Given whole compilation unit estimate of INSNS, compute how large we can
1573 allow the unit to grow. */
1575 static int
1576 compute_max_insns (int insns)
1578 int max_insns = insns;
1579 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1580 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1582 return ((int64_t) max_insns
1583 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1587 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1589 static void
1590 add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges)
1592 while (new_edges.length () > 0)
1594 struct cgraph_edge *edge = new_edges.pop ();
1596 gcc_assert (!edge->aux);
1597 if (edge->inline_failed
1598 && can_inline_edge_p (edge, true)
1599 && want_inline_small_function_p (edge, true))
1600 edge->aux = heap->insert (edge_badness (edge, false), edge);
1604 /* Remove EDGE from the fibheap. */
1606 static void
1607 heap_edge_removal_hook (struct cgraph_edge *e, void *data)
1609 if (e->aux)
1611 ((edge_heap_t *)data)->delete_node ((edge_heap_node_t *)e->aux);
1612 e->aux = NULL;
1616 /* Return true if speculation of edge E seems useful.
1617 If ANTICIPATE_INLINING is true, be conservative and hope that E
1618 may get inlined. */
1620 bool
1621 speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
1623 enum availability avail;
1624 struct cgraph_node *target = e->callee->ultimate_alias_target (&avail,
1625 e->caller);
1626 struct cgraph_edge *direct, *indirect;
1627 struct ipa_ref *ref;
1629 gcc_assert (e->speculative && !e->indirect_unknown_callee);
1631 if (!e->maybe_hot_p ())
1632 return false;
1634 /* See if IP optimizations found something potentially useful about the
1635 function. For now we look only for CONST/PURE flags. Almost everything
1636 else we propagate is useless. */
1637 if (avail >= AVAIL_AVAILABLE)
1639 int ecf_flags = flags_from_decl_or_type (target->decl);
1640 if (ecf_flags & ECF_CONST)
1642 e->speculative_call_info (direct, indirect, ref);
1643 if (!(indirect->indirect_info->ecf_flags & ECF_CONST))
1644 return true;
1646 else if (ecf_flags & ECF_PURE)
1648 e->speculative_call_info (direct, indirect, ref);
1649 if (!(indirect->indirect_info->ecf_flags & ECF_PURE))
1650 return true;
1653 /* If we did not managed to inline the function nor redirect
1654 to an ipa-cp clone (that are seen by having local flag set),
1655 it is probably pointless to inline it unless hardware is missing
1656 indirect call predictor. */
1657 if (!anticipate_inlining && e->inline_failed && !target->local.local)
1658 return false;
1659 /* For overwritable targets there is not much to do. */
1660 if (e->inline_failed && !can_inline_edge_p (e, false, true))
1661 return false;
1662 /* OK, speculation seems interesting. */
1663 return true;
1666 /* We know that EDGE is not going to be inlined.
1667 See if we can remove speculation. */
1669 static void
1670 resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
1672 if (edge->speculative && !speculation_useful_p (edge, false))
1674 struct cgraph_node *node = edge->caller;
1675 struct cgraph_node *where = node->global.inlined_to
1676 ? node->global.inlined_to : node;
1677 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1679 spec_rem += edge->count;
1680 edge->resolve_speculation ();
1681 reset_edge_caches (where);
1682 inline_update_overall_summary (where);
1683 update_caller_keys (edge_heap, where,
1684 updated_nodes, NULL);
1685 update_callee_keys (edge_heap, where,
1686 updated_nodes);
1687 BITMAP_FREE (updated_nodes);
1691 /* Return true if NODE should be accounted for overall size estimate.
1692 Skip all nodes optimized for size so we can measure the growth of hot
1693 part of program no matter of the padding. */
1695 bool
1696 inline_account_function_p (struct cgraph_node *node)
1698 return (!DECL_EXTERNAL (node->decl)
1699 && !opt_for_fn (node->decl, optimize_size)
1700 && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED);
1703 /* Count number of callers of NODE and store it into DATA (that
1704 points to int. Worker for cgraph_for_node_and_aliases. */
1706 static bool
1707 sum_callers (struct cgraph_node *node, void *data)
1709 struct cgraph_edge *e;
1710 int *num_calls = (int *)data;
1712 for (e = node->callers; e; e = e->next_caller)
1713 (*num_calls)++;
1714 return false;
1717 /* We use greedy algorithm for inlining of small functions:
1718 All inline candidates are put into prioritized heap ordered in
1719 increasing badness.
1721 The inlining of small functions is bounded by unit growth parameters. */
1723 static void
1724 inline_small_functions (void)
1726 struct cgraph_node *node;
1727 struct cgraph_edge *edge;
1728 edge_heap_t edge_heap (sreal::min ());
1729 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1730 int min_size, max_size;
1731 auto_vec<cgraph_edge *> new_indirect_edges;
1732 int initial_size = 0;
1733 struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
1734 struct cgraph_edge_hook_list *edge_removal_hook_holder;
1735 new_indirect_edges.create (8);
1737 edge_removal_hook_holder
1738 = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap);
1740 /* Compute overall unit size and other global parameters used by badness
1741 metrics. */
1743 max_count = 0;
1744 ipa_reduced_postorder (order, true, true, NULL);
1745 free (order);
1747 FOR_EACH_DEFINED_FUNCTION (node)
1748 if (!node->global.inlined_to)
1750 if (!node->alias && node->analyzed
1751 && (node->has_gimple_body_p () || node->thunk.thunk_p))
1753 struct inline_summary *info = inline_summaries->get (node);
1754 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
1756 /* Do not account external functions, they will be optimized out
1757 if not inlined. Also only count the non-cold portion of program. */
1758 if (inline_account_function_p (node))
1759 initial_size += info->size;
1760 info->growth = estimate_growth (node);
1762 int num_calls = 0;
1763 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
1764 true);
1765 if (num_calls == 1)
1766 info->single_caller = true;
1767 if (dfs && dfs->next_cycle)
1769 struct cgraph_node *n2;
1770 int id = dfs->scc_no + 1;
1771 for (n2 = node; n2;
1772 n2 = ((struct ipa_dfs_info *) node->aux)->next_cycle)
1774 struct inline_summary *info2 = inline_summaries->get (n2);
1775 if (info2->scc_no)
1776 break;
1777 info2->scc_no = id;
1782 for (edge = node->callers; edge; edge = edge->next_caller)
1783 if (max_count < edge->count)
1784 max_count = edge->count;
1786 ipa_free_postorder_info ();
1787 initialize_growth_caches ();
1789 if (dump_file)
1790 fprintf (dump_file,
1791 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1792 initial_size);
1794 overall_size = initial_size;
1795 max_size = compute_max_insns (overall_size);
1796 min_size = overall_size;
1798 /* Populate the heap with all edges we might inline. */
1800 FOR_EACH_DEFINED_FUNCTION (node)
1802 bool update = false;
1803 struct cgraph_edge *next = NULL;
1804 bool has_speculative = false;
1806 if (dump_file)
1807 fprintf (dump_file, "Enqueueing calls in %s/%i.\n",
1808 node->name (), node->order);
1810 for (edge = node->callees; edge; edge = next)
1812 next = edge->next_callee;
1813 if (edge->inline_failed
1814 && !edge->aux
1815 && can_inline_edge_p (edge, true)
1816 && want_inline_small_function_p (edge, true)
1817 && edge->inline_failed)
1819 gcc_assert (!edge->aux);
1820 update_edge_key (&edge_heap, edge);
1822 if (edge->speculative)
1823 has_speculative = true;
1825 if (has_speculative)
1826 for (edge = node->callees; edge; edge = next)
1827 if (edge->speculative && !speculation_useful_p (edge,
1828 edge->aux != NULL))
1830 edge->resolve_speculation ();
1831 update = true;
1833 if (update)
1835 struct cgraph_node *where = node->global.inlined_to
1836 ? node->global.inlined_to : node;
1837 inline_update_overall_summary (where);
1838 reset_edge_caches (where);
1839 update_caller_keys (&edge_heap, where,
1840 updated_nodes, NULL);
1841 update_callee_keys (&edge_heap, where,
1842 updated_nodes);
1843 bitmap_clear (updated_nodes);
1847 gcc_assert (in_lto_p
1848 || !max_count
1849 || (profile_info && flag_branch_probabilities));
1851 while (!edge_heap.empty ())
1853 int old_size = overall_size;
1854 struct cgraph_node *where, *callee;
1855 sreal badness = edge_heap.min_key ();
1856 sreal current_badness;
1857 int growth;
1859 edge = edge_heap.extract_min ();
1860 gcc_assert (edge->aux);
1861 edge->aux = NULL;
1862 if (!edge->inline_failed || !edge->callee->analyzed)
1863 continue;
1865 #if CHECKING_P
1866 /* Be sure that caches are maintained consistent. */
1867 sreal cached_badness = edge_badness (edge, false);
1869 int old_size_est = estimate_edge_size (edge);
1870 sreal old_time_est = estimate_edge_time (edge);
1871 int old_hints_est = estimate_edge_hints (edge);
1873 reset_edge_growth_cache (edge);
1874 gcc_assert (old_size_est == estimate_edge_size (edge));
1875 gcc_assert (old_time_est == estimate_edge_time (edge));
1876 /* FIXME:
1878 gcc_assert (old_hints_est == estimate_edge_hints (edge));
1880 fails with profile feedback because some hints depends on
1881 maybe_hot_edge_p predicate and because callee gets inlined to other
1882 calls, the edge may become cold.
1883 This ought to be fixed by computing relative probabilities
1884 for given invocation but that will be better done once whole
1885 code is converted to sreals. Disable for now and revert to "wrong"
1886 value so enable/disable checking paths agree. */
1887 edge_growth_cache[edge->uid].hints = old_hints_est + 1;
1889 /* When updating the edge costs, we only decrease badness in the keys.
1890 Increases of badness are handled lazilly; when we see key with out
1891 of date value on it, we re-insert it now. */
1892 current_badness = edge_badness (edge, false);
1893 /* Disable checking for profile because roundoff errors may cause slight
1894 deviations in the order. */
1895 gcc_assert (max_count || cached_badness == current_badness);
1896 gcc_assert (current_badness >= badness);
1897 #else
1898 current_badness = edge_badness (edge, false);
1899 #endif
1900 if (current_badness != badness)
1902 if (edge_heap.min () && current_badness > edge_heap.min_key ())
1904 edge->aux = edge_heap.insert (current_badness, edge);
1905 continue;
1907 else
1908 badness = current_badness;
1911 if (!can_inline_edge_p (edge, true))
1913 resolve_noninline_speculation (&edge_heap, edge);
1914 continue;
1917 callee = edge->callee->ultimate_alias_target ();
1918 growth = estimate_edge_growth (edge);
1919 if (dump_file)
1921 fprintf (dump_file,
1922 "\nConsidering %s/%i with %i size\n",
1923 callee->name (), callee->order,
1924 inline_summaries->get (callee)->size);
1925 fprintf (dump_file,
1926 " to be inlined into %s/%i in %s:%i\n"
1927 " Estimated badness is %f, frequency %.2f.\n",
1928 edge->caller->name (), edge->caller->order,
1929 edge->call_stmt
1930 && (LOCATION_LOCUS (gimple_location ((const gimple *)
1931 edge->call_stmt))
1932 > BUILTINS_LOCATION)
1933 ? gimple_filename ((const gimple *) edge->call_stmt)
1934 : "unknown",
1935 edge->call_stmt
1936 ? gimple_lineno ((const gimple *) edge->call_stmt)
1937 : -1,
1938 badness.to_double (),
1939 edge->frequency / (double)CGRAPH_FREQ_BASE);
1940 if (edge->count)
1941 fprintf (dump_file," Called %" PRId64"x\n",
1942 edge->count);
1943 if (dump_flags & TDF_DETAILS)
1944 edge_badness (edge, true);
1947 if (overall_size + growth > max_size
1948 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1950 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1951 report_inline_failed_reason (edge);
1952 resolve_noninline_speculation (&edge_heap, edge);
1953 continue;
1956 if (!want_inline_small_function_p (edge, true))
1958 resolve_noninline_speculation (&edge_heap, edge);
1959 continue;
1962 /* Heuristics for inlining small functions work poorly for
1963 recursive calls where we do effects similar to loop unrolling.
1964 When inlining such edge seems profitable, leave decision on
1965 specific inliner. */
1966 if (edge->recursive_p ())
1968 where = edge->caller;
1969 if (where->global.inlined_to)
1970 where = where->global.inlined_to;
1971 if (!recursive_inlining (edge,
1972 opt_for_fn (edge->caller->decl,
1973 flag_indirect_inlining)
1974 ? &new_indirect_edges : NULL))
1976 edge->inline_failed = CIF_RECURSIVE_INLINING;
1977 resolve_noninline_speculation (&edge_heap, edge);
1978 continue;
1980 reset_edge_caches (where);
1981 /* Recursive inliner inlines all recursive calls of the function
1982 at once. Consequently we need to update all callee keys. */
1983 if (opt_for_fn (edge->caller->decl, flag_indirect_inlining))
1984 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
1985 update_callee_keys (&edge_heap, where, updated_nodes);
1986 bitmap_clear (updated_nodes);
1988 else
1990 struct cgraph_node *outer_node = NULL;
1991 int depth = 0;
1993 /* Consider the case where self recursive function A is inlined
1994 into B. This is desired optimization in some cases, since it
1995 leads to effect similar of loop peeling and we might completely
1996 optimize out the recursive call. However we must be extra
1997 selective. */
1999 where = edge->caller;
2000 while (where->global.inlined_to)
2002 if (where->decl == callee->decl)
2003 outer_node = where, depth++;
2004 where = where->callers->caller;
2006 if (outer_node
2007 && !want_inline_self_recursive_call_p (edge, outer_node,
2008 true, depth))
2010 edge->inline_failed
2011 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
2012 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
2013 resolve_noninline_speculation (&edge_heap, edge);
2014 continue;
2016 else if (depth && dump_file)
2017 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
2019 gcc_checking_assert (!callee->global.inlined_to);
2020 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
2021 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
2023 reset_edge_caches (edge->callee);
2025 update_callee_keys (&edge_heap, where, updated_nodes);
2027 where = edge->caller;
2028 if (where->global.inlined_to)
2029 where = where->global.inlined_to;
2031 /* Our profitability metric can depend on local properties
2032 such as number of inlinable calls and size of the function body.
2033 After inlining these properties might change for the function we
2034 inlined into (since it's body size changed) and for the functions
2035 called by function we inlined (since number of it inlinable callers
2036 might change). */
2037 update_caller_keys (&edge_heap, where, updated_nodes, NULL);
2038 /* Offline copy count has possibly changed, recompute if profile is
2039 available. */
2040 if (max_count)
2042 struct cgraph_node *n = cgraph_node::get (edge->callee->decl);
2043 if (n != edge->callee && n->analyzed)
2044 update_callee_keys (&edge_heap, n, updated_nodes);
2046 bitmap_clear (updated_nodes);
2048 if (dump_file)
2050 fprintf (dump_file,
2051 " Inlined %s into %s which now has time %f and size %i, "
2052 "net change of %+i.\n",
2053 edge->callee->name (),
2054 edge->caller->name (),
2055 inline_summaries->get (edge->caller)->time.to_double (),
2056 inline_summaries->get (edge->caller)->size,
2057 overall_size - old_size);
2059 if (min_size > overall_size)
2061 min_size = overall_size;
2062 max_size = compute_max_insns (min_size);
2064 if (dump_file)
2065 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
2069 free_growth_caches ();
2070 if (dump_file)
2071 fprintf (dump_file,
2072 "Unit growth for small function inlining: %i->%i (%i%%)\n",
2073 initial_size, overall_size,
2074 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
2075 BITMAP_FREE (updated_nodes);
2076 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
2079 /* Flatten NODE. Performed both during early inlining and
2080 at IPA inlining time. */
2082 static void
2083 flatten_function (struct cgraph_node *node, bool early)
2085 struct cgraph_edge *e;
2087 /* We shouldn't be called recursively when we are being processed. */
2088 gcc_assert (node->aux == NULL);
2090 node->aux = (void *) node;
2092 for (e = node->callees; e; e = e->next_callee)
2094 struct cgraph_node *orig_callee;
2095 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2097 /* We've hit cycle? It is time to give up. */
2098 if (callee->aux)
2100 if (dump_file)
2101 fprintf (dump_file,
2102 "Not inlining %s into %s to avoid cycle.\n",
2103 xstrdup_for_dump (callee->name ()),
2104 xstrdup_for_dump (e->caller->name ()));
2105 e->inline_failed = CIF_RECURSIVE_INLINING;
2106 continue;
2109 /* When the edge is already inlined, we just need to recurse into
2110 it in order to fully flatten the leaves. */
2111 if (!e->inline_failed)
2113 flatten_function (callee, early);
2114 continue;
2117 /* Flatten attribute needs to be processed during late inlining. For
2118 extra code quality we however do flattening during early optimization,
2119 too. */
2120 if (!early
2121 ? !can_inline_edge_p (e, true)
2122 : !can_early_inline_edge_p (e))
2123 continue;
2125 if (e->recursive_p ())
2127 if (dump_file)
2128 fprintf (dump_file, "Not inlining: recursive call.\n");
2129 continue;
2132 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
2133 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
2135 if (dump_file)
2136 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
2137 continue;
2140 /* Inline the edge and flatten the inline clone. Avoid
2141 recursing through the original node if the node was cloned. */
2142 if (dump_file)
2143 fprintf (dump_file, " Inlining %s into %s.\n",
2144 xstrdup_for_dump (callee->name ()),
2145 xstrdup_for_dump (e->caller->name ()));
2146 orig_callee = callee;
2147 inline_call (e, true, NULL, NULL, false);
2148 if (e->callee != orig_callee)
2149 orig_callee->aux = (void *) node;
2150 flatten_function (e->callee, early);
2151 if (e->callee != orig_callee)
2152 orig_callee->aux = NULL;
2155 node->aux = NULL;
2156 if (!node->global.inlined_to)
2157 inline_update_overall_summary (node);
2160 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
2161 DATA points to number of calls originally found so we avoid infinite
2162 recursion. */
2164 static bool
2165 inline_to_all_callers_1 (struct cgraph_node *node, void *data,
2166 hash_set<cgraph_node *> *callers)
2168 int *num_calls = (int *)data;
2169 bool callee_removed = false;
2171 while (node->callers && !node->global.inlined_to)
2173 struct cgraph_node *caller = node->callers->caller;
2175 if (!can_inline_edge_p (node->callers, true)
2176 || node->callers->recursive_p ())
2178 if (dump_file)
2179 fprintf (dump_file, "Uninlinable call found; giving up.\n");
2180 *num_calls = 0;
2181 return false;
2184 if (dump_file)
2186 fprintf (dump_file,
2187 "\nInlining %s size %i.\n",
2188 node->name (),
2189 inline_summaries->get (node)->size);
2190 fprintf (dump_file,
2191 " Called once from %s %i insns.\n",
2192 node->callers->caller->name (),
2193 inline_summaries->get (node->callers->caller)->size);
2196 /* Remember which callers we inlined to, delaying updating the
2197 overall summary. */
2198 callers->add (node->callers->caller);
2199 inline_call (node->callers, true, NULL, NULL, false, &callee_removed);
2200 if (dump_file)
2201 fprintf (dump_file,
2202 " Inlined into %s which now has %i size\n",
2203 caller->name (),
2204 inline_summaries->get (caller)->size);
2205 if (!(*num_calls)--)
2207 if (dump_file)
2208 fprintf (dump_file, "New calls found; giving up.\n");
2209 return callee_removed;
2211 if (callee_removed)
2212 return true;
2214 return false;
2217 /* Wrapper around inline_to_all_callers_1 doing delayed overall summary
2218 update. */
2220 static bool
2221 inline_to_all_callers (struct cgraph_node *node, void *data)
2223 hash_set<cgraph_node *> callers;
2224 bool res = inline_to_all_callers_1 (node, data, &callers);
2225 /* Perform the delayed update of the overall summary of all callers
2226 processed. This avoids quadratic behavior in the cases where
2227 we have a lot of calls to the same function. */
2228 for (hash_set<cgraph_node *>::iterator i = callers.begin ();
2229 i != callers.end (); ++i)
2230 inline_update_overall_summary (*i);
2231 return res;
2234 /* Output overall time estimate. */
2235 static void
2236 dump_overall_stats (void)
2238 sreal sum_weighted = 0, sum = 0;
2239 struct cgraph_node *node;
2241 FOR_EACH_DEFINED_FUNCTION (node)
2242 if (!node->global.inlined_to
2243 && !node->alias)
2245 sreal time = inline_summaries->get (node)->time;
2246 sum += time;
2247 sum_weighted += time * node->count;
2249 fprintf (dump_file, "Overall time estimate: "
2250 "%f weighted by profile: "
2251 "%f\n", sum.to_double (), sum_weighted.to_double ());
2254 /* Output some useful stats about inlining. */
2256 static void
2257 dump_inline_stats (void)
2259 int64_t inlined_cnt = 0, inlined_indir_cnt = 0;
2260 int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0;
2261 int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0;
2262 int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
2263 int64_t inlined_speculative = 0, inlined_speculative_ply = 0;
2264 int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
2265 int64_t reason[CIF_N_REASONS][3];
2266 int i;
2267 struct cgraph_node *node;
2269 memset (reason, 0, sizeof (reason));
2270 FOR_EACH_DEFINED_FUNCTION (node)
2272 struct cgraph_edge *e;
2273 for (e = node->callees; e; e = e->next_callee)
2275 if (e->inline_failed)
2277 reason[(int) e->inline_failed][0] += e->count;
2278 reason[(int) e->inline_failed][1] += e->frequency;
2279 reason[(int) e->inline_failed][2] ++;
2280 if (DECL_VIRTUAL_P (e->callee->decl))
2282 if (e->indirect_inlining_edge)
2283 noninlined_virt_indir_cnt += e->count;
2284 else
2285 noninlined_virt_cnt += e->count;
2287 else
2289 if (e->indirect_inlining_edge)
2290 noninlined_indir_cnt += e->count;
2291 else
2292 noninlined_cnt += e->count;
2295 else
2297 if (e->speculative)
2299 if (DECL_VIRTUAL_P (e->callee->decl))
2300 inlined_speculative_ply += e->count;
2301 else
2302 inlined_speculative += e->count;
2304 else if (DECL_VIRTUAL_P (e->callee->decl))
2306 if (e->indirect_inlining_edge)
2307 inlined_virt_indir_cnt += e->count;
2308 else
2309 inlined_virt_cnt += e->count;
2311 else
2313 if (e->indirect_inlining_edge)
2314 inlined_indir_cnt += e->count;
2315 else
2316 inlined_cnt += e->count;
2320 for (e = node->indirect_calls; e; e = e->next_callee)
2321 if (e->indirect_info->polymorphic)
2322 indirect_poly_cnt += e->count;
2323 else
2324 indirect_cnt += e->count;
2326 if (max_count)
2328 fprintf (dump_file,
2329 "Inlined %" PRId64 " + speculative "
2330 "%" PRId64 " + speculative polymorphic "
2331 "%" PRId64 " + previously indirect "
2332 "%" PRId64 " + virtual "
2333 "%" PRId64 " + virtual and previously indirect "
2334 "%" PRId64 "\n" "Not inlined "
2335 "%" PRId64 " + previously indirect "
2336 "%" PRId64 " + virtual "
2337 "%" PRId64 " + virtual and previously indirect "
2338 "%" PRId64 " + stil indirect "
2339 "%" PRId64 " + still indirect polymorphic "
2340 "%" PRId64 "\n", inlined_cnt,
2341 inlined_speculative, inlined_speculative_ply,
2342 inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
2343 noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
2344 noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
2345 fprintf (dump_file,
2346 "Removed speculations %" PRId64 "\n",
2347 spec_rem);
2349 dump_overall_stats ();
2350 fprintf (dump_file, "\nWhy inlining failed?\n");
2351 for (i = 0; i < CIF_N_REASONS; i++)
2352 if (reason[i][2])
2353 fprintf (dump_file, "%-50s: %8i calls, %8i freq, %" PRId64" count\n",
2354 cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
2355 (int) reason[i][2], (int) reason[i][1], reason[i][0]);
2358 /* Decide on the inlining. We do so in the topological order to avoid
2359 expenses on updating data structures. */
2361 static unsigned int
2362 ipa_inline (void)
2364 struct cgraph_node *node;
2365 int nnodes;
2366 struct cgraph_node **order;
2367 int i;
2368 int cold;
2369 bool remove_functions = false;
2371 if (!optimize)
2372 return 0;
2374 cgraph_freq_base_rec = (sreal) 1 / (sreal) CGRAPH_FREQ_BASE;
2375 percent_rec = (sreal) 1 / (sreal) 100;
2377 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
2379 if (in_lto_p && optimize)
2380 ipa_update_after_lto_read ();
2382 if (dump_file)
2383 dump_inline_summaries (dump_file);
2385 nnodes = ipa_reverse_postorder (order);
2387 FOR_EACH_FUNCTION (node)
2389 node->aux = 0;
2391 /* Recompute the default reasons for inlining because they may have
2392 changed during merging. */
2393 if (in_lto_p)
2395 for (cgraph_edge *e = node->callees; e; e = e->next_callee)
2397 gcc_assert (e->inline_failed);
2398 initialize_inline_failed (e);
2400 for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
2401 initialize_inline_failed (e);
2405 if (dump_file)
2406 fprintf (dump_file, "\nFlattening functions:\n");
2408 /* In the first pass handle functions to be flattened. Do this with
2409 a priority so none of our later choices will make this impossible. */
2410 for (i = nnodes - 1; i >= 0; i--)
2412 node = order[i];
2414 /* Handle nodes to be flattened.
2415 Ideally when processing callees we stop inlining at the
2416 entry of cycles, possibly cloning that entry point and
2417 try to flatten itself turning it into a self-recursive
2418 function. */
2419 if (lookup_attribute ("flatten",
2420 DECL_ATTRIBUTES (node->decl)) != NULL)
2422 if (dump_file)
2423 fprintf (dump_file,
2424 "Flattening %s\n", node->name ());
2425 flatten_function (node, false);
2428 if (dump_file)
2429 dump_overall_stats ();
2431 inline_small_functions ();
2433 gcc_assert (symtab->state == IPA_SSA);
2434 symtab->state = IPA_SSA_AFTER_INLINING;
2435 /* Do first after-inlining removal. We want to remove all "stale" extern
2436 inline functions and virtual functions so we really know what is called
2437 once. */
2438 symtab->remove_unreachable_nodes (dump_file);
2439 free (order);
2441 /* Inline functions with a property that after inlining into all callers the
2442 code size will shrink because the out-of-line copy is eliminated.
2443 We do this regardless on the callee size as long as function growth limits
2444 are met. */
2445 if (dump_file)
2446 fprintf (dump_file,
2447 "\nDeciding on functions to be inlined into all callers and "
2448 "removing useless speculations:\n");
2450 /* Inlining one function called once has good chance of preventing
2451 inlining other function into the same callee. Ideally we should
2452 work in priority order, but probably inlining hot functions first
2453 is good cut without the extra pain of maintaining the queue.
2455 ??? this is not really fitting the bill perfectly: inlining function
2456 into callee often leads to better optimization of callee due to
2457 increased context for optimization.
2458 For example if main() function calls a function that outputs help
2459 and then function that does the main optmization, we should inline
2460 the second with priority even if both calls are cold by themselves.
2462 We probably want to implement new predicate replacing our use of
2463 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2464 to be hot. */
2465 for (cold = 0; cold <= 1; cold ++)
2467 FOR_EACH_DEFINED_FUNCTION (node)
2469 struct cgraph_edge *edge, *next;
2470 bool update=false;
2472 for (edge = node->callees; edge; edge = next)
2474 next = edge->next_callee;
2475 if (edge->speculative && !speculation_useful_p (edge, false))
2477 edge->resolve_speculation ();
2478 spec_rem += edge->count;
2479 update = true;
2480 remove_functions = true;
2483 if (update)
2485 struct cgraph_node *where = node->global.inlined_to
2486 ? node->global.inlined_to : node;
2487 reset_edge_caches (where);
2488 inline_update_overall_summary (where);
2490 if (want_inline_function_to_all_callers_p (node, cold))
2492 int num_calls = 0;
2493 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
2494 true);
2495 while (node->call_for_symbol_and_aliases
2496 (inline_to_all_callers, &num_calls, true))
2498 remove_functions = true;
2503 /* Free ipa-prop structures if they are no longer needed. */
2504 if (optimize)
2505 ipa_free_all_structures_after_iinln ();
2507 if (dump_file)
2509 fprintf (dump_file,
2510 "\nInlined %i calls, eliminated %i functions\n\n",
2511 ncalls_inlined, nfunctions_inlined);
2512 dump_inline_stats ();
2515 if (dump_file)
2516 dump_inline_summaries (dump_file);
2517 /* In WPA we use inline summaries for partitioning process. */
2518 if (!flag_wpa)
2519 inline_free_summary ();
2520 return remove_functions ? TODO_remove_functions : 0;
2523 /* Inline always-inline function calls in NODE. */
2525 static bool
2526 inline_always_inline_functions (struct cgraph_node *node)
2528 struct cgraph_edge *e;
2529 bool inlined = false;
2531 for (e = node->callees; e; e = e->next_callee)
2533 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2534 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
2535 continue;
2537 if (e->recursive_p ())
2539 if (dump_file)
2540 fprintf (dump_file, " Not inlining recursive call to %s.\n",
2541 e->callee->name ());
2542 e->inline_failed = CIF_RECURSIVE_INLINING;
2543 continue;
2546 if (!can_early_inline_edge_p (e))
2548 /* Set inlined to true if the callee is marked "always_inline" but
2549 is not inlinable. This will allow flagging an error later in
2550 expand_call_inline in tree-inline.c. */
2551 if (lookup_attribute ("always_inline",
2552 DECL_ATTRIBUTES (callee->decl)) != NULL)
2553 inlined = true;
2554 continue;
2557 if (dump_file)
2558 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
2559 xstrdup_for_dump (e->callee->name ()),
2560 xstrdup_for_dump (e->caller->name ()));
2561 inline_call (e, true, NULL, NULL, false);
2562 inlined = true;
2564 if (inlined)
2565 inline_update_overall_summary (node);
2567 return inlined;
2570 /* Decide on the inlining. We do so in the topological order to avoid
2571 expenses on updating data structures. */
2573 static bool
2574 early_inline_small_functions (struct cgraph_node *node)
2576 struct cgraph_edge *e;
2577 bool inlined = false;
2579 for (e = node->callees; e; e = e->next_callee)
2581 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2582 if (!inline_summaries->get (callee)->inlinable
2583 || !e->inline_failed)
2584 continue;
2586 /* Do not consider functions not declared inline. */
2587 if (!DECL_DECLARED_INLINE_P (callee->decl)
2588 && !opt_for_fn (node->decl, flag_inline_small_functions)
2589 && !opt_for_fn (node->decl, flag_inline_functions))
2590 continue;
2592 if (dump_file)
2593 fprintf (dump_file, "Considering inline candidate %s.\n",
2594 callee->name ());
2596 if (!can_early_inline_edge_p (e))
2597 continue;
2599 if (e->recursive_p ())
2601 if (dump_file)
2602 fprintf (dump_file, " Not inlining: recursive call.\n");
2603 continue;
2606 if (!want_early_inline_function_p (e))
2607 continue;
2609 if (dump_file)
2610 fprintf (dump_file, " Inlining %s into %s.\n",
2611 xstrdup_for_dump (callee->name ()),
2612 xstrdup_for_dump (e->caller->name ()));
2613 inline_call (e, true, NULL, NULL, false);
2614 inlined = true;
2617 if (inlined)
2618 inline_update_overall_summary (node);
2620 return inlined;
2623 unsigned int
2624 early_inliner (function *fun)
2626 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2627 struct cgraph_edge *edge;
2628 unsigned int todo = 0;
2629 int iterations = 0;
2630 bool inlined = false;
2632 if (seen_error ())
2633 return 0;
2635 /* Do nothing if datastructures for ipa-inliner are already computed. This
2636 happens when some pass decides to construct new function and
2637 cgraph_add_new_function calls lowering passes and early optimization on
2638 it. This may confuse ourself when early inliner decide to inline call to
2639 function clone, because function clones don't have parameter list in
2640 ipa-prop matching their signature. */
2641 if (ipa_node_params_sum)
2642 return 0;
2644 if (flag_checking)
2645 node->verify ();
2646 node->remove_all_references ();
2648 /* Rebuild this reference because it dosn't depend on
2649 function's body and it's required to pass cgraph_node
2650 verification. */
2651 if (node->instrumented_version
2652 && !node->instrumentation_clone)
2653 node->create_reference (node->instrumented_version, IPA_REF_CHKP, NULL);
2655 /* Even when not optimizing or not inlining inline always-inline
2656 functions. */
2657 inlined = inline_always_inline_functions (node);
2659 if (!optimize
2660 || flag_no_inline
2661 || !flag_early_inlining
2662 /* Never inline regular functions into always-inline functions
2663 during incremental inlining. This sucks as functions calling
2664 always inline functions will get less optimized, but at the
2665 same time inlining of functions calling always inline
2666 function into an always inline function might introduce
2667 cycles of edges to be always inlined in the callgraph.
2669 We might want to be smarter and just avoid this type of inlining. */
2670 || (DECL_DISREGARD_INLINE_LIMITS (node->decl)
2671 && lookup_attribute ("always_inline",
2672 DECL_ATTRIBUTES (node->decl))))
2674 else if (lookup_attribute ("flatten",
2675 DECL_ATTRIBUTES (node->decl)) != NULL)
2677 /* When the function is marked to be flattened, recursively inline
2678 all calls in it. */
2679 if (dump_file)
2680 fprintf (dump_file,
2681 "Flattening %s\n", node->name ());
2682 flatten_function (node, true);
2683 inlined = true;
2685 else
2687 /* If some always_inline functions was inlined, apply the changes.
2688 This way we will not account always inline into growth limits and
2689 moreover we will inline calls from always inlines that we skipped
2690 previously because of conditional above. */
2691 if (inlined)
2693 timevar_push (TV_INTEGRATION);
2694 todo |= optimize_inline_calls (current_function_decl);
2695 /* optimize_inline_calls call above might have introduced new
2696 statements that don't have inline parameters computed. */
2697 for (edge = node->callees; edge; edge = edge->next_callee)
2699 if (inline_edge_summary_vec.length () > (unsigned) edge->uid)
2701 struct inline_edge_summary *es = inline_edge_summary (edge);
2702 es->call_stmt_size
2703 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2704 es->call_stmt_time
2705 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2708 inline_update_overall_summary (node);
2709 inlined = false;
2710 timevar_pop (TV_INTEGRATION);
2712 /* We iterate incremental inlining to get trivial cases of indirect
2713 inlining. */
2714 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
2715 && early_inline_small_functions (node))
2717 timevar_push (TV_INTEGRATION);
2718 todo |= optimize_inline_calls (current_function_decl);
2720 /* Technically we ought to recompute inline parameters so the new
2721 iteration of early inliner works as expected. We however have
2722 values approximately right and thus we only need to update edge
2723 info that might be cleared out for newly discovered edges. */
2724 for (edge = node->callees; edge; edge = edge->next_callee)
2726 /* We have no summary for new bound store calls yet. */
2727 if (inline_edge_summary_vec.length () > (unsigned)edge->uid)
2729 struct inline_edge_summary *es = inline_edge_summary (edge);
2730 es->call_stmt_size
2731 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2732 es->call_stmt_time
2733 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2735 if (edge->callee->decl
2736 && !gimple_check_call_matching_types (
2737 edge->call_stmt, edge->callee->decl, false))
2739 edge->inline_failed = CIF_MISMATCHED_ARGUMENTS;
2740 edge->call_stmt_cannot_inline_p = true;
2743 if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
2744 inline_update_overall_summary (node);
2745 timevar_pop (TV_INTEGRATION);
2746 iterations++;
2747 inlined = false;
2749 if (dump_file)
2750 fprintf (dump_file, "Iterations: %i\n", iterations);
2753 if (inlined)
2755 timevar_push (TV_INTEGRATION);
2756 todo |= optimize_inline_calls (current_function_decl);
2757 timevar_pop (TV_INTEGRATION);
2760 fun->always_inline_functions_inlined = true;
2762 return todo;
2765 /* Do inlining of small functions. Doing so early helps profiling and other
2766 passes to be somewhat more effective and avoids some code duplication in
2767 later real inlining pass for testcases with very many function calls. */
2769 namespace {
2771 const pass_data pass_data_early_inline =
2773 GIMPLE_PASS, /* type */
2774 "einline", /* name */
2775 OPTGROUP_INLINE, /* optinfo_flags */
2776 TV_EARLY_INLINING, /* tv_id */
2777 PROP_ssa, /* properties_required */
2778 0, /* properties_provided */
2779 0, /* properties_destroyed */
2780 0, /* todo_flags_start */
2781 0, /* todo_flags_finish */
2784 class pass_early_inline : public gimple_opt_pass
2786 public:
2787 pass_early_inline (gcc::context *ctxt)
2788 : gimple_opt_pass (pass_data_early_inline, ctxt)
2791 /* opt_pass methods: */
2792 virtual unsigned int execute (function *);
2794 }; // class pass_early_inline
2796 unsigned int
2797 pass_early_inline::execute (function *fun)
2799 return early_inliner (fun);
2802 } // anon namespace
2804 gimple_opt_pass *
2805 make_pass_early_inline (gcc::context *ctxt)
2807 return new pass_early_inline (ctxt);
2810 namespace {
2812 const pass_data pass_data_ipa_inline =
2814 IPA_PASS, /* type */
2815 "inline", /* name */
2816 OPTGROUP_INLINE, /* optinfo_flags */
2817 TV_IPA_INLINING, /* tv_id */
2818 0, /* properties_required */
2819 0, /* properties_provided */
2820 0, /* properties_destroyed */
2821 0, /* todo_flags_start */
2822 ( TODO_dump_symtab ), /* todo_flags_finish */
2825 class pass_ipa_inline : public ipa_opt_pass_d
2827 public:
2828 pass_ipa_inline (gcc::context *ctxt)
2829 : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
2830 inline_generate_summary, /* generate_summary */
2831 inline_write_summary, /* write_summary */
2832 inline_read_summary, /* read_summary */
2833 NULL, /* write_optimization_summary */
2834 NULL, /* read_optimization_summary */
2835 NULL, /* stmt_fixup */
2836 0, /* function_transform_todo_flags_start */
2837 inline_transform, /* function_transform */
2838 NULL) /* variable_transform */
2841 /* opt_pass methods: */
2842 virtual unsigned int execute (function *) { return ipa_inline (); }
2844 }; // class pass_ipa_inline
2846 } // anon namespace
2848 ipa_opt_pass_d *
2849 make_pass_ipa_inline (gcc::context *ctxt)
2851 return new pass_ipa_inline (ctxt);