2015-06-11 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / ipa-inline.c
blob4380f0df008b895144febe2a4456961a19011e09
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
29 on).
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
34 inlining.
36 inlining heuristics
38 The inliner itself is split into two passes:
40 pass_early_inlining
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
55 flattening.
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
60 optimizers.
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
68 pass_ipa_inline
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "tm.h"
96 #include "input.h"
97 #include "alias.h"
98 #include "symtab.h"
99 #include "tree.h"
100 #include "fold-const.h"
101 #include "trans-mem.h"
102 #include "calls.h"
103 #include "tree-inline.h"
104 #include "langhooks.h"
105 #include "flags.h"
106 #include "diagnostic.h"
107 #include "gimple-pretty-print.h"
108 #include "params.h"
109 #include "intl.h"
110 #include "tree-pass.h"
111 #include "coverage.h"
112 #include "rtl.h"
113 #include "bitmap.h"
114 #include "profile.h"
115 #include "predict.h"
116 #include "hard-reg-set.h"
117 #include "input.h"
118 #include "function.h"
119 #include "basic-block.h"
120 #include "tree-ssa-alias.h"
121 #include "internal-fn.h"
122 #include "gimple-expr.h"
123 #include "is-a.h"
124 #include "gimple.h"
125 #include "gimple-ssa.h"
126 #include "plugin-api.h"
127 #include "ipa-ref.h"
128 #include "cgraph.h"
129 #include "alloc-pool.h"
130 #include "symbol-summary.h"
131 #include "ipa-prop.h"
132 #include "except.h"
133 #include "target.h"
134 #include "ipa-inline.h"
135 #include "ipa-utils.h"
136 #include "sreal.h"
137 #include "auto-profile.h"
138 #include "builtins.h"
139 #include "fibonacci_heap.h"
140 #include "lto-streamer.h"
142 typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
143 typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
145 /* Statistics we collect about inlining algorithm. */
146 static int overall_size;
147 static gcov_type max_count;
148 static gcov_type spec_rem;
150 /* Pre-computed constants 1/CGRAPH_FREQ_BASE and 1/100. */
151 static sreal cgraph_freq_base_rec, percent_rec;
153 /* Return false when inlining edge E would lead to violating
154 limits on function unit growth or stack usage growth.
156 The relative function body growth limit is present generally
157 to avoid problems with non-linear behavior of the compiler.
158 To allow inlining huge functions into tiny wrapper, the limit
159 is always based on the bigger of the two functions considered.
161 For stack growth limits we always base the growth in stack usage
162 of the callers. We want to prevent applications from segfaulting
163 on stack overflow when functions with huge stack frames gets
164 inlined. */
166 static bool
167 caller_growth_limits (struct cgraph_edge *e)
169 struct cgraph_node *to = e->caller;
170 struct cgraph_node *what = e->callee->ultimate_alias_target ();
171 int newsize;
172 int limit = 0;
173 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
174 inline_summary *info, *what_info, *outer_info = inline_summaries->get (to);
176 /* Look for function e->caller is inlined to. While doing
177 so work out the largest function body on the way. As
178 described above, we want to base our function growth
179 limits based on that. Not on the self size of the
180 outer function, not on the self size of inline code
181 we immediately inline to. This is the most relaxed
182 interpretation of the rule "do not grow large functions
183 too much in order to prevent compiler from exploding". */
184 while (true)
186 info = inline_summaries->get (to);
187 if (limit < info->self_size)
188 limit = info->self_size;
189 if (stack_size_limit < info->estimated_self_stack_size)
190 stack_size_limit = info->estimated_self_stack_size;
191 if (to->global.inlined_to)
192 to = to->callers->caller;
193 else
194 break;
197 what_info = inline_summaries->get (what);
199 if (limit < what_info->self_size)
200 limit = what_info->self_size;
202 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
204 /* Check the size after inlining against the function limits. But allow
205 the function to shrink if it went over the limits by forced inlining. */
206 newsize = estimate_size_after_inlining (to, e);
207 if (newsize >= info->size
208 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
209 && newsize > limit)
211 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
212 return false;
215 if (!what_info->estimated_stack_size)
216 return true;
218 /* FIXME: Stack size limit often prevents inlining in Fortran programs
219 due to large i/o datastructures used by the Fortran front-end.
220 We ought to ignore this limit when we know that the edge is executed
221 on every invocation of the caller (i.e. its call statement dominates
222 exit block). We do not track this information, yet. */
223 stack_size_limit += ((gcov_type)stack_size_limit
224 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
226 inlined_stack = (outer_info->stack_frame_offset
227 + outer_info->estimated_self_stack_size
228 + what_info->estimated_stack_size);
229 /* Check new stack consumption with stack consumption at the place
230 stack is used. */
231 if (inlined_stack > stack_size_limit
232 /* If function already has large stack usage from sibling
233 inline call, we can inline, too.
234 This bit overoptimistically assume that we are good at stack
235 packing. */
236 && inlined_stack > info->estimated_stack_size
237 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
239 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
240 return false;
242 return true;
245 /* Dump info about why inlining has failed. */
247 static void
248 report_inline_failed_reason (struct cgraph_edge *e)
250 if (dump_file)
252 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
253 xstrdup_for_dump (e->caller->name ()), e->caller->order,
254 xstrdup_for_dump (e->callee->name ()), e->callee->order,
255 cgraph_inline_failed_string (e->inline_failed));
256 if ((e->inline_failed == CIF_TARGET_OPTION_MISMATCH
257 || e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
258 && e->caller->lto_file_data
259 && e->callee->function_symbol ()->lto_file_data)
261 fprintf (dump_file, " LTO objects: %s, %s\n",
262 e->caller->lto_file_data->file_name,
263 e->callee->function_symbol ()->lto_file_data->file_name);
265 if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH)
266 cl_target_option_print_diff
267 (dump_file, 2, target_opts_for_fn (e->caller->decl),
268 target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
269 if (e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
270 cl_optimization_print_diff
271 (dump_file, 2, opts_for_fn (e->caller->decl),
272 opts_for_fn (e->callee->ultimate_alias_target ()->decl));
276 /* Decide whether sanitizer-related attributes allow inlining. */
278 static bool
279 sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
281 /* Don't care if sanitizer is disabled */
282 if (!(flag_sanitize & SANITIZE_ADDRESS))
283 return true;
285 if (!caller || !callee)
286 return true;
288 return !!lookup_attribute ("no_sanitize_address",
289 DECL_ATTRIBUTES (caller)) ==
290 !!lookup_attribute ("no_sanitize_address",
291 DECL_ATTRIBUTES (callee));
294 /* Used for flags where it is safe to inline when caller's value is
295 grater than callee's. */
296 #define check_maybe_up(flag) \
297 (opts_for_fn (caller->decl)->x_##flag \
298 != opts_for_fn (callee->decl)->x_##flag \
299 && (!always_inline \
300 || opts_for_fn (caller->decl)->x_##flag \
301 < opts_for_fn (callee->decl)->x_##flag))
302 /* Used for flags where it is safe to inline when caller's value is
303 smaller than callee's. */
304 #define check_maybe_down(flag) \
305 (opts_for_fn (caller->decl)->x_##flag \
306 != opts_for_fn (callee->decl)->x_##flag \
307 && (!always_inline \
308 || opts_for_fn (caller->decl)->x_##flag \
309 > opts_for_fn (callee->decl)->x_##flag))
310 /* Used for flags where exact match is needed for correctness. */
311 #define check_match(flag) \
312 (opts_for_fn (caller->decl)->x_##flag \
313 != opts_for_fn (callee->decl)->x_##flag)
315 /* Decide if we can inline the edge and possibly update
316 inline_failed reason.
317 We check whether inlining is possible at all and whether
318 caller growth limits allow doing so.
320 if REPORT is true, output reason to the dump file.
322 if DISREGARD_LIMITS is true, ignore size limits.*/
324 static bool
325 can_inline_edge_p (struct cgraph_edge *e, bool report,
326 bool disregard_limits = false, bool early = false)
328 gcc_checking_assert (e->inline_failed);
330 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
332 if (report)
333 report_inline_failed_reason (e);
334 return false;
337 bool inlinable = true;
338 enum availability avail;
339 cgraph_node *callee = e->callee->ultimate_alias_target (&avail);
340 cgraph_node *caller = e->caller->global.inlined_to
341 ? e->caller->global.inlined_to : e->caller;
342 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl);
343 tree callee_tree
344 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
346 if (!callee->definition)
348 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
349 inlinable = false;
351 else if (callee->calls_comdat_local)
353 e->inline_failed = CIF_USES_COMDAT_LOCAL;
354 inlinable = false;
356 else if (avail <= AVAIL_INTERPOSABLE)
358 e->inline_failed = CIF_OVERWRITABLE;
359 inlinable = false;
361 else if (e->call_stmt_cannot_inline_p)
363 if (e->inline_failed != CIF_FUNCTION_NOT_OPTIMIZED)
364 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
365 inlinable = false;
367 /* Don't inline if the functions have different EH personalities. */
368 else if (DECL_FUNCTION_PERSONALITY (caller->decl)
369 && DECL_FUNCTION_PERSONALITY (callee->decl)
370 && (DECL_FUNCTION_PERSONALITY (caller->decl)
371 != DECL_FUNCTION_PERSONALITY (callee->decl)))
373 e->inline_failed = CIF_EH_PERSONALITY;
374 inlinable = false;
376 /* TM pure functions should not be inlined into non-TM_pure
377 functions. */
378 else if (is_tm_pure (callee->decl) && !is_tm_pure (caller->decl))
380 e->inline_failed = CIF_UNSPECIFIED;
381 inlinable = false;
383 /* Check compatibility of target optimization options. */
384 else if (!targetm.target_option.can_inline_p (caller->decl,
385 callee->decl))
387 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
388 inlinable = false;
390 else if (!inline_summaries->get (callee)->inlinable)
392 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
393 inlinable = false;
395 else if (inline_summaries->get (caller)->contains_cilk_spawn)
397 e->inline_failed = CIF_CILK_SPAWN;
398 inlinable = false;
400 /* Don't inline a function with mismatched sanitization attributes. */
401 else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl))
403 e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
404 inlinable = false;
406 /* Check if caller growth allows the inlining. */
407 else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
408 && !disregard_limits
409 && !lookup_attribute ("flatten",
410 DECL_ATTRIBUTES (caller->decl))
411 && !caller_growth_limits (e))
412 inlinable = false;
413 /* Don't inline a function with a higher optimization level than the
414 caller. FIXME: this is really just tip of iceberg of handling
415 optimization attribute. */
416 else if (caller_tree != callee_tree)
418 bool always_inline =
419 (DECL_DISREGARD_INLINE_LIMITS (callee->decl)
420 && lookup_attribute ("always_inline",
421 DECL_ATTRIBUTES (callee->decl)));
423 /* Until GCC 4.9 we did not check the semantics alterning flags
424 bellow and inline across optimization boundry.
425 Enabling checks bellow breaks several packages by refusing
426 to inline library always_inline functions. See PR65873.
427 Disable the check for early inlining for now until better solution
428 is found. */
429 if (always_inline && early)
431 /* There are some options that change IL semantics which means
432 we cannot inline in these cases for correctness reason.
433 Not even for always_inline declared functions. */
434 /* Strictly speaking only when the callee contains signed integer
435 math where overflow is undefined. */
436 else if ((check_maybe_up (flag_strict_overflow)
437 /* this flag is set by optimize. Allow inlining across
438 optimize boundary. */
439 && (!opt_for_fn (caller->decl, optimize)
440 == !opt_for_fn (callee->decl, optimize) || !always_inline))
441 || check_match (flag_wrapv)
442 || check_match (flag_trapv)
443 /* Strictly speaking only when the callee uses FP math. */
444 || check_maybe_up (flag_rounding_math)
445 || check_maybe_up (flag_trapping_math)
446 || check_maybe_down (flag_unsafe_math_optimizations)
447 || check_maybe_down (flag_finite_math_only)
448 || check_maybe_up (flag_signaling_nans)
449 || check_maybe_down (flag_cx_limited_range)
450 || check_maybe_up (flag_signed_zeros)
451 || check_maybe_down (flag_associative_math)
452 || check_maybe_down (flag_reciprocal_math)
453 /* We do not want to make code compiled with exceptions to be
454 brought into a non-EH function unless we know that the callee
455 does not throw.
456 This is tracked by DECL_FUNCTION_PERSONALITY. */
457 || (check_match (flag_non_call_exceptions)
458 /* TODO: We also may allow bringing !flag_non_call_exceptions
459 to flag_non_call_exceptions function, but that may need
460 extra work in tree-inline to add the extra EH edges. */
461 && (!opt_for_fn (callee->decl, flag_non_call_exceptions)
462 || DECL_FUNCTION_PERSONALITY (callee->decl)))
463 || (check_maybe_up (flag_exceptions)
464 && DECL_FUNCTION_PERSONALITY (callee->decl))
465 /* Strictly speaking only when the callee contains function
466 calls that may end up setting errno. */
467 || check_maybe_up (flag_errno_math)
468 /* When devirtualization is diabled for callee, it is not safe
469 to inline it as we possibly mangled the type info.
470 Allow early inlining of always inlines. */
471 || (!early && check_maybe_down (flag_devirtualize)))
473 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
474 inlinable = false;
476 /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */
477 else if (always_inline)
479 /* When user added an attribute to the callee honor it. */
480 else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee->decl))
481 && opts_for_fn (caller->decl) != opts_for_fn (callee->decl))
483 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
484 inlinable = false;
486 /* If explicit optimize attribute are not used, the mismatch is caused
487 by different command line options used to build different units.
488 Do not care about COMDAT functions - those are intended to be
489 optimized with the optimization flags of module they are used in.
490 Also do not care about mixing up size/speed optimization when
491 DECL_DISREGARD_INLINE_LIMITS is set. */
492 else if ((callee->merged
493 && !lookup_attribute ("optimize",
494 DECL_ATTRIBUTES (caller->decl)))
495 || DECL_DISREGARD_INLINE_LIMITS (callee->decl))
497 /* If mismatch is caused by merging two LTO units with different
498 optimizationflags we want to be bit nicer. However never inline
499 if one of functions is not optimized at all. */
500 else if (!opt_for_fn (callee->decl, optimize)
501 || !opt_for_fn (caller->decl, optimize))
503 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
504 inlinable = false;
506 /* If callee is optimized for size and caller is not, allow inlining if
507 code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee
508 is inline (and thus likely an unified comdat). This will allow caller
509 to run faster. */
510 else if (opt_for_fn (callee->decl, optimize_size)
511 > opt_for_fn (caller->decl, optimize_size))
513 int growth = estimate_edge_growth (e);
514 if (growth > 0
515 && (!DECL_DECLARED_INLINE_P (callee->decl)
516 && growth >= MAX (MAX_INLINE_INSNS_SINGLE,
517 MAX_INLINE_INSNS_AUTO)))
519 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
520 inlinable = false;
523 /* If callee is more aggressively optimized for performance than caller,
524 we generally want to inline only cheap (runtime wise) functions. */
525 else if (opt_for_fn (callee->decl, optimize_size)
526 < opt_for_fn (caller->decl, optimize_size)
527 || (opt_for_fn (callee->decl, optimize)
528 > opt_for_fn (caller->decl, optimize)))
530 if (estimate_edge_time (e)
531 >= 20 + inline_edge_summary (e)->call_stmt_time)
533 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
534 inlinable = false;
540 if (!inlinable && report)
541 report_inline_failed_reason (e);
542 return inlinable;
546 /* Return true if the edge E is inlinable during early inlining. */
548 static bool
549 can_early_inline_edge_p (struct cgraph_edge *e)
551 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
552 /* Early inliner might get called at WPA stage when IPA pass adds new
553 function. In this case we can not really do any of early inlining
554 because function bodies are missing. */
555 if (!gimple_has_body_p (callee->decl))
557 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
558 return false;
560 /* In early inliner some of callees may not be in SSA form yet
561 (i.e. the callgraph is cyclic and we did not process
562 the callee by early inliner, yet). We don't have CIF code for this
563 case; later we will re-do the decision in the real inliner. */
564 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
565 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
567 if (dump_file)
568 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
569 return false;
571 if (!can_inline_edge_p (e, true, false, true))
572 return false;
573 return true;
577 /* Return number of calls in N. Ignore cheap builtins. */
579 static int
580 num_calls (struct cgraph_node *n)
582 struct cgraph_edge *e;
583 int num = 0;
585 for (e = n->callees; e; e = e->next_callee)
586 if (!is_inexpensive_builtin (e->callee->decl))
587 num++;
588 return num;
592 /* Return true if we are interested in inlining small function. */
594 static bool
595 want_early_inline_function_p (struct cgraph_edge *e)
597 bool want_inline = true;
598 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
600 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
602 /* For AutoFDO, we need to make sure that before profile summary, all
603 hot paths' IR look exactly the same as profiled binary. As a result,
604 in einliner, we will disregard size limit and inline those callsites
605 that are:
606 * inlined in the profiled binary, and
607 * the cloned callee has enough samples to be considered "hot". */
608 else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e))
610 else if (!DECL_DECLARED_INLINE_P (callee->decl)
611 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
613 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
614 report_inline_failed_reason (e);
615 want_inline = false;
617 else
619 int growth = estimate_edge_growth (e);
620 int n;
622 if (growth <= 0)
624 else if (!e->maybe_hot_p ()
625 && growth > 0)
627 if (dump_file)
628 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
629 "call is cold and code would grow by %i\n",
630 xstrdup_for_dump (e->caller->name ()),
631 e->caller->order,
632 xstrdup_for_dump (callee->name ()), callee->order,
633 growth);
634 want_inline = false;
636 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
638 if (dump_file)
639 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
640 "growth %i exceeds --param early-inlining-insns\n",
641 xstrdup_for_dump (e->caller->name ()),
642 e->caller->order,
643 xstrdup_for_dump (callee->name ()), callee->order,
644 growth);
645 want_inline = false;
647 else if ((n = num_calls (callee)) != 0
648 && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
650 if (dump_file)
651 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
652 "growth %i exceeds --param early-inlining-insns "
653 "divided by number of calls\n",
654 xstrdup_for_dump (e->caller->name ()),
655 e->caller->order,
656 xstrdup_for_dump (callee->name ()), callee->order,
657 growth);
658 want_inline = false;
661 return want_inline;
664 /* Compute time of the edge->caller + edge->callee execution when inlining
665 does not happen. */
667 inline sreal
668 compute_uninlined_call_time (struct inline_summary *callee_info,
669 struct cgraph_edge *edge)
671 sreal uninlined_call_time = (sreal)callee_info->time;
672 cgraph_node *caller = (edge->caller->global.inlined_to
673 ? edge->caller->global.inlined_to
674 : edge->caller);
676 if (edge->count && caller->count)
677 uninlined_call_time *= (sreal)edge->count / caller->count;
678 if (edge->frequency)
679 uninlined_call_time *= cgraph_freq_base_rec * edge->frequency;
680 else
681 uninlined_call_time = uninlined_call_time >> 11;
683 int caller_time = inline_summaries->get (caller)->time;
684 return uninlined_call_time + caller_time;
687 /* Same as compute_uinlined_call_time but compute time when inlining
688 does happen. */
690 inline sreal
691 compute_inlined_call_time (struct cgraph_edge *edge,
692 int edge_time)
694 cgraph_node *caller = (edge->caller->global.inlined_to
695 ? edge->caller->global.inlined_to
696 : edge->caller);
697 int caller_time = inline_summaries->get (caller)->time;
698 sreal time = edge_time;
700 if (edge->count && caller->count)
701 time *= (sreal)edge->count / caller->count;
702 if (edge->frequency)
703 time *= cgraph_freq_base_rec * edge->frequency;
704 else
705 time = time >> 11;
707 /* This calculation should match one in ipa-inline-analysis.
708 FIXME: Once ipa-inline-analysis is converted to sreal this can be
709 simplified. */
710 time -= (sreal) ((gcov_type) edge->frequency
711 * inline_edge_summary (edge)->call_stmt_time
712 * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE)) / INLINE_TIME_SCALE;
713 time += caller_time;
714 if (time <= 0)
715 time = ((sreal) 1) >> 8;
716 gcc_checking_assert (time >= 0);
717 return time;
720 /* Return true if the speedup for inlining E is bigger than
721 PARAM_MAX_INLINE_MIN_SPEEDUP. */
723 static bool
724 big_speedup_p (struct cgraph_edge *e)
726 sreal time = compute_uninlined_call_time (inline_summaries->get (e->callee),
728 sreal inlined_time = compute_inlined_call_time (e, estimate_edge_time (e));
730 if (time - inlined_time
731 > (sreal) time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP)
732 * percent_rec)
733 return true;
734 return false;
737 /* Return true if we are interested in inlining small function.
738 When REPORT is true, report reason to dump file. */
740 static bool
741 want_inline_small_function_p (struct cgraph_edge *e, bool report)
743 bool want_inline = true;
744 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
746 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
748 else if (!DECL_DECLARED_INLINE_P (callee->decl)
749 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
751 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
752 want_inline = false;
754 /* Do fast and conservative check if the function can be good
755 inline candidate. At the moment we allow inline hints to
756 promote non-inline functions to inline and we increase
757 MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
758 else if ((!DECL_DECLARED_INLINE_P (callee->decl)
759 && (!e->count || !e->maybe_hot_p ()))
760 && inline_summaries->get (callee)->min_size
761 - inline_edge_summary (e)->call_stmt_size
762 > MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
764 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
765 want_inline = false;
767 else if ((DECL_DECLARED_INLINE_P (callee->decl) || e->count)
768 && inline_summaries->get (callee)->min_size
769 - inline_edge_summary (e)->call_stmt_size
770 > 16 * MAX_INLINE_INSNS_SINGLE)
772 e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
773 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
774 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
775 want_inline = false;
777 else
779 int growth = estimate_edge_growth (e);
780 inline_hints hints = estimate_edge_hints (e);
781 bool big_speedup = big_speedup_p (e);
783 if (growth <= 0)
785 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
786 hints suggests that inlining given function is very profitable. */
787 else if (DECL_DECLARED_INLINE_P (callee->decl)
788 && growth >= MAX_INLINE_INSNS_SINGLE
789 && ((!big_speedup
790 && !(hints & (INLINE_HINT_indirect_call
791 | INLINE_HINT_known_hot
792 | INLINE_HINT_loop_iterations
793 | INLINE_HINT_array_index
794 | INLINE_HINT_loop_stride)))
795 || growth >= MAX_INLINE_INSNS_SINGLE * 16))
797 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
798 want_inline = false;
800 else if (!DECL_DECLARED_INLINE_P (callee->decl)
801 && !opt_for_fn (e->caller->decl, flag_inline_functions))
803 /* growth_likely_positive is expensive, always test it last. */
804 if (growth >= MAX_INLINE_INSNS_SINGLE
805 || growth_likely_positive (callee, growth))
807 e->inline_failed = CIF_NOT_DECLARED_INLINED;
808 want_inline = false;
811 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
812 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
813 inlining given function is very profitable. */
814 else if (!DECL_DECLARED_INLINE_P (callee->decl)
815 && !big_speedup
816 && !(hints & INLINE_HINT_known_hot)
817 && growth >= ((hints & (INLINE_HINT_indirect_call
818 | INLINE_HINT_loop_iterations
819 | INLINE_HINT_array_index
820 | INLINE_HINT_loop_stride))
821 ? MAX (MAX_INLINE_INSNS_AUTO,
822 MAX_INLINE_INSNS_SINGLE)
823 : MAX_INLINE_INSNS_AUTO))
825 /* growth_likely_positive is expensive, always test it last. */
826 if (growth >= MAX_INLINE_INSNS_SINGLE
827 || growth_likely_positive (callee, growth))
829 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
830 want_inline = false;
833 /* If call is cold, do not inline when function body would grow. */
834 else if (!e->maybe_hot_p ()
835 && (growth >= MAX_INLINE_INSNS_SINGLE
836 || growth_likely_positive (callee, growth)))
838 e->inline_failed = CIF_UNLIKELY_CALL;
839 want_inline = false;
842 if (!want_inline && report)
843 report_inline_failed_reason (e);
844 return want_inline;
847 /* EDGE is self recursive edge.
848 We hand two cases - when function A is inlining into itself
849 or when function A is being inlined into another inliner copy of function
850 A within function B.
852 In first case OUTER_NODE points to the toplevel copy of A, while
853 in the second case OUTER_NODE points to the outermost copy of A in B.
855 In both cases we want to be extra selective since
856 inlining the call will just introduce new recursive calls to appear. */
858 static bool
859 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
860 struct cgraph_node *outer_node,
861 bool peeling,
862 int depth)
864 char const *reason = NULL;
865 bool want_inline = true;
866 int caller_freq = CGRAPH_FREQ_BASE;
867 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
869 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
870 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
872 if (!edge->maybe_hot_p ())
874 reason = "recursive call is cold";
875 want_inline = false;
877 else if (max_count && !outer_node->count)
879 reason = "not executed in profile";
880 want_inline = false;
882 else if (depth > max_depth)
884 reason = "--param max-inline-recursive-depth exceeded.";
885 want_inline = false;
888 if (outer_node->global.inlined_to)
889 caller_freq = outer_node->callers->frequency;
891 if (!caller_freq)
893 reason = "function is inlined and unlikely";
894 want_inline = false;
897 if (!want_inline)
899 /* Inlining of self recursive function into copy of itself within other function
900 is transformation similar to loop peeling.
902 Peeling is profitable if we can inline enough copies to make probability
903 of actual call to the self recursive function very small. Be sure that
904 the probability of recursion is small.
906 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
907 This way the expected number of recision is at most max_depth. */
908 else if (peeling)
910 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
911 / max_depth);
912 int i;
913 for (i = 1; i < depth; i++)
914 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
915 if (max_count
916 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
917 >= max_prob))
919 reason = "profile of recursive call is too large";
920 want_inline = false;
922 if (!max_count
923 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
924 >= max_prob))
926 reason = "frequency of recursive call is too large";
927 want_inline = false;
930 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
931 depth is large. We reduce function call overhead and increase chances that
932 things fit in hardware return predictor.
934 Recursive inlining might however increase cost of stack frame setup
935 actually slowing down functions whose recursion tree is wide rather than
936 deep.
938 Deciding reliably on when to do recursive inlining without profile feedback
939 is tricky. For now we disable recursive inlining when probability of self
940 recursion is low.
942 Recursive inlining of self recursive call within loop also results in large loop
943 depths that generally optimize badly. We may want to throttle down inlining
944 in those cases. In particular this seems to happen in one of libstdc++ rb tree
945 methods. */
946 else
948 if (max_count
949 && (edge->count * 100 / outer_node->count
950 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
952 reason = "profile of recursive call is too small";
953 want_inline = false;
955 else if (!max_count
956 && (edge->frequency * 100 / caller_freq
957 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
959 reason = "frequency of recursive call is too small";
960 want_inline = false;
963 if (!want_inline && dump_file)
964 fprintf (dump_file, " not inlining recursively: %s\n", reason);
965 return want_inline;
968 /* Return true when NODE has uninlinable caller;
969 set HAS_HOT_CALL if it has hot call.
970 Worker for cgraph_for_node_and_aliases. */
972 static bool
973 check_callers (struct cgraph_node *node, void *has_hot_call)
975 struct cgraph_edge *e;
976 for (e = node->callers; e; e = e->next_caller)
978 if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once))
979 return true;
980 if (!can_inline_edge_p (e, true))
981 return true;
982 if (e->recursive_p ())
983 return true;
984 if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
985 *(bool *)has_hot_call = true;
987 return false;
990 /* If NODE has a caller, return true. */
992 static bool
993 has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
995 if (node->callers)
996 return true;
997 return false;
1000 /* Decide if inlining NODE would reduce unit size by eliminating
1001 the offline copy of function.
1002 When COLD is true the cold calls are considered, too. */
1004 static bool
1005 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
1007 bool has_hot_call = false;
1009 /* Aliases gets inlined along with the function they alias. */
1010 if (node->alias)
1011 return false;
1012 /* Already inlined? */
1013 if (node->global.inlined_to)
1014 return false;
1015 /* Does it have callers? */
1016 if (!node->call_for_symbol_and_aliases (has_caller_p, NULL, true))
1017 return false;
1018 /* Inlining into all callers would increase size? */
1019 if (estimate_growth (node) > 0)
1020 return false;
1021 /* All inlines must be possible. */
1022 if (node->call_for_symbol_and_aliases (check_callers, &has_hot_call,
1023 true))
1024 return false;
1025 if (!cold && !has_hot_call)
1026 return false;
1027 return true;
1030 /* A cost model driving the inlining heuristics in a way so the edges with
1031 smallest badness are inlined first. After each inlining is performed
1032 the costs of all caller edges of nodes affected are recomputed so the
1033 metrics may accurately depend on values such as number of inlinable callers
1034 of the function or function body size. */
1036 static sreal
1037 edge_badness (struct cgraph_edge *edge, bool dump)
1039 sreal badness;
1040 int growth, edge_time;
1041 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1042 struct inline_summary *callee_info = inline_summaries->get (callee);
1043 inline_hints hints;
1044 cgraph_node *caller = (edge->caller->global.inlined_to
1045 ? edge->caller->global.inlined_to
1046 : edge->caller);
1048 growth = estimate_edge_growth (edge);
1049 edge_time = estimate_edge_time (edge);
1050 hints = estimate_edge_hints (edge);
1051 gcc_checking_assert (edge_time >= 0);
1052 gcc_checking_assert (edge_time <= callee_info->time);
1053 gcc_checking_assert (growth <= callee_info->size);
1055 if (dump)
1057 fprintf (dump_file, " Badness calculation for %s/%i -> %s/%i\n",
1058 xstrdup_for_dump (edge->caller->name ()),
1059 edge->caller->order,
1060 xstrdup_for_dump (callee->name ()),
1061 edge->callee->order);
1062 fprintf (dump_file, " size growth %i, time %i ",
1063 growth,
1064 edge_time);
1065 dump_inline_hints (dump_file, hints);
1066 if (big_speedup_p (edge))
1067 fprintf (dump_file, " big_speedup");
1068 fprintf (dump_file, "\n");
1071 /* Always prefer inlining saving code size. */
1072 if (growth <= 0)
1074 badness = (sreal) (-SREAL_MIN_SIG + growth) << (SREAL_MAX_EXP / 256);
1075 if (dump)
1076 fprintf (dump_file, " %f: Growth %d <= 0\n", badness.to_double (),
1077 growth);
1079 /* Inlining into EXTERNAL functions is not going to change anything unless
1080 they are themselves inlined. */
1081 else if (DECL_EXTERNAL (caller->decl))
1083 if (dump)
1084 fprintf (dump_file, " max: function is external\n");
1085 return sreal::max ();
1087 /* When profile is available. Compute badness as:
1089 time_saved * caller_count
1090 goodness = -------------------------------------------------
1091 growth_of_caller * overall_growth * combined_size
1093 badness = - goodness
1095 Again use negative value to make calls with profile appear hotter
1096 then calls without.
1098 else if (opt_for_fn (caller->decl, flag_guess_branch_prob) || caller->count)
1100 sreal numerator, denominator;
1101 int overall_growth;
1103 numerator = (compute_uninlined_call_time (callee_info, edge)
1104 - compute_inlined_call_time (edge, edge_time));
1105 if (numerator == 0)
1106 numerator = ((sreal) 1 >> 8);
1107 if (caller->count)
1108 numerator *= caller->count;
1109 else if (opt_for_fn (caller->decl, flag_branch_probabilities))
1110 numerator = numerator >> 11;
1111 denominator = growth;
1113 overall_growth = callee_info->growth;
1115 /* Look for inliner wrappers of the form:
1117 inline_caller ()
1119 do_fast_job...
1120 if (need_more_work)
1121 noninline_callee ();
1123 Withhout panilizing this case, we usually inline noninline_callee
1124 into the inline_caller because overall_growth is small preventing
1125 further inlining of inline_caller.
1127 Penalize only callgraph edges to functions with small overall
1128 growth ...
1130 if (growth > overall_growth
1131 /* ... and having only one caller which is not inlined ... */
1132 && callee_info->single_caller
1133 && !edge->caller->global.inlined_to
1134 /* ... and edges executed only conditionally ... */
1135 && edge->frequency < CGRAPH_FREQ_BASE
1136 /* ... consider case where callee is not inline but caller is ... */
1137 && ((!DECL_DECLARED_INLINE_P (edge->callee->decl)
1138 && DECL_DECLARED_INLINE_P (caller->decl))
1139 /* ... or when early optimizers decided to split and edge
1140 frequency still indicates splitting is a win ... */
1141 || (callee->split_part && !caller->split_part
1142 && edge->frequency
1143 < CGRAPH_FREQ_BASE
1144 * PARAM_VALUE
1145 (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY) / 100
1146 /* ... and do not overwrite user specified hints. */
1147 && (!DECL_DECLARED_INLINE_P (edge->callee->decl)
1148 || DECL_DECLARED_INLINE_P (caller->decl)))))
1150 struct inline_summary *caller_info = inline_summaries->get (caller);
1151 int caller_growth = caller_info->growth;
1153 /* Only apply the penalty when caller looks like inline candidate,
1154 and it is not called once and. */
1155 if (!caller_info->single_caller && overall_growth < caller_growth
1156 && caller_info->inlinable
1157 && caller_info->size
1158 < (DECL_DECLARED_INLINE_P (caller->decl)
1159 ? MAX_INLINE_INSNS_SINGLE : MAX_INLINE_INSNS_AUTO))
1161 if (dump)
1162 fprintf (dump_file,
1163 " Wrapper penalty. Increasing growth %i to %i\n",
1164 overall_growth, caller_growth);
1165 overall_growth = caller_growth;
1168 if (overall_growth > 0)
1170 /* Strongly preffer functions with few callers that can be inlined
1171 fully. The square root here leads to smaller binaries at average.
1172 Watch however for extreme cases and return to linear function
1173 when growth is large. */
1174 if (overall_growth < 256)
1175 overall_growth *= overall_growth;
1176 else
1177 overall_growth += 256 * 256 - 256;
1178 denominator *= overall_growth;
1180 denominator *= inline_summaries->get (caller)->self_size + growth;
1182 badness = - numerator / denominator;
1184 if (dump)
1186 fprintf (dump_file,
1187 " %f: guessed profile. frequency %f, count %" PRId64
1188 " caller count %" PRId64
1189 " time w/o inlining %f, time w inlining %f"
1190 " overall growth %i (current) %i (original)"
1191 " %i (compensated)\n",
1192 badness.to_double (),
1193 (double)edge->frequency / CGRAPH_FREQ_BASE,
1194 edge->count, caller->count,
1195 compute_uninlined_call_time (callee_info, edge).to_double (),
1196 compute_inlined_call_time (edge, edge_time).to_double (),
1197 estimate_growth (callee),
1198 callee_info->growth, overall_growth);
1201 /* When function local profile is not available or it does not give
1202 useful information (ie frequency is zero), base the cost on
1203 loop nest and overall size growth, so we optimize for overall number
1204 of functions fully inlined in program. */
1205 else
1207 int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
1208 badness = growth;
1210 /* Decrease badness if call is nested. */
1211 if (badness > 0)
1212 badness = badness >> nest;
1213 else
1214 badness = badness << nest;
1215 if (dump)
1216 fprintf (dump_file, " %f: no profile. nest %i\n",
1217 badness.to_double (), nest);
1219 gcc_checking_assert (badness != 0);
1221 if (edge->recursive_p ())
1222 badness = badness.shift (badness > 0 ? 4 : -4);
1223 if ((hints & (INLINE_HINT_indirect_call
1224 | INLINE_HINT_loop_iterations
1225 | INLINE_HINT_array_index
1226 | INLINE_HINT_loop_stride))
1227 || callee_info->growth <= 0)
1228 badness = badness.shift (badness > 0 ? -2 : 2);
1229 if (hints & (INLINE_HINT_same_scc))
1230 badness = badness.shift (badness > 0 ? 3 : -3);
1231 else if (hints & (INLINE_HINT_in_scc))
1232 badness = badness.shift (badness > 0 ? 2 : -2);
1233 else if (hints & (INLINE_HINT_cross_module))
1234 badness = badness.shift (badness > 0 ? 1 : -1);
1235 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1236 badness = badness.shift (badness > 0 ? -4 : 4);
1237 else if ((hints & INLINE_HINT_declared_inline))
1238 badness = badness.shift (badness > 0 ? -3 : 3);
1239 if (dump)
1240 fprintf (dump_file, " Adjusted by hints %f\n", badness.to_double ());
1241 return badness;
1244 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1245 static inline void
1246 update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
1248 sreal badness = edge_badness (edge, false);
1249 if (edge->aux)
1251 edge_heap_node_t *n = (edge_heap_node_t *) edge->aux;
1252 gcc_checking_assert (n->get_data () == edge);
1254 /* fibonacci_heap::replace_key does busy updating of the
1255 heap that is unnecesarily expensive.
1256 We do lazy increases: after extracting minimum if the key
1257 turns out to be out of date, it is re-inserted into heap
1258 with correct value. */
1259 if (badness < n->get_key ())
1261 if (dump_file && (dump_flags & TDF_DETAILS))
1263 fprintf (dump_file,
1264 " decreasing badness %s/%i -> %s/%i, %f"
1265 " to %f\n",
1266 xstrdup_for_dump (edge->caller->name ()),
1267 edge->caller->order,
1268 xstrdup_for_dump (edge->callee->name ()),
1269 edge->callee->order,
1270 n->get_key ().to_double (),
1271 badness.to_double ());
1273 heap->decrease_key (n, badness);
1276 else
1278 if (dump_file && (dump_flags & TDF_DETAILS))
1280 fprintf (dump_file,
1281 " enqueuing call %s/%i -> %s/%i, badness %f\n",
1282 xstrdup_for_dump (edge->caller->name ()),
1283 edge->caller->order,
1284 xstrdup_for_dump (edge->callee->name ()),
1285 edge->callee->order,
1286 badness.to_double ());
1288 edge->aux = heap->insert (badness, edge);
1293 /* NODE was inlined.
1294 All caller edges needs to be resetted because
1295 size estimates change. Similarly callees needs reset
1296 because better context may be known. */
1298 static void
1299 reset_edge_caches (struct cgraph_node *node)
1301 struct cgraph_edge *edge;
1302 struct cgraph_edge *e = node->callees;
1303 struct cgraph_node *where = node;
1304 struct ipa_ref *ref;
1306 if (where->global.inlined_to)
1307 where = where->global.inlined_to;
1309 for (edge = where->callers; edge; edge = edge->next_caller)
1310 if (edge->inline_failed)
1311 reset_edge_growth_cache (edge);
1313 FOR_EACH_ALIAS (where, ref)
1314 reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
1316 if (!e)
1317 return;
1319 while (true)
1320 if (!e->inline_failed && e->callee->callees)
1321 e = e->callee->callees;
1322 else
1324 if (e->inline_failed)
1325 reset_edge_growth_cache (e);
1326 if (e->next_callee)
1327 e = e->next_callee;
1328 else
1332 if (e->caller == node)
1333 return;
1334 e = e->caller->callers;
1336 while (!e->next_callee);
1337 e = e->next_callee;
1342 /* Recompute HEAP nodes for each of caller of NODE.
1343 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1344 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1345 it is inlinable. Otherwise check all edges. */
1347 static void
1348 update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
1349 bitmap updated_nodes,
1350 struct cgraph_edge *check_inlinablity_for)
1352 struct cgraph_edge *edge;
1353 struct ipa_ref *ref;
1355 if ((!node->alias && !inline_summaries->get (node)->inlinable)
1356 || node->global.inlined_to)
1357 return;
1358 if (!bitmap_set_bit (updated_nodes, node->uid))
1359 return;
1361 FOR_EACH_ALIAS (node, ref)
1363 struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
1364 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1367 for (edge = node->callers; edge; edge = edge->next_caller)
1368 if (edge->inline_failed)
1370 if (!check_inlinablity_for
1371 || check_inlinablity_for == edge)
1373 if (can_inline_edge_p (edge, false)
1374 && want_inline_small_function_p (edge, false))
1375 update_edge_key (heap, edge);
1376 else if (edge->aux)
1378 report_inline_failed_reason (edge);
1379 heap->delete_node ((edge_heap_node_t *) edge->aux);
1380 edge->aux = NULL;
1383 else if (edge->aux)
1384 update_edge_key (heap, edge);
1388 /* Recompute HEAP nodes for each uninlined call in NODE.
1389 This is used when we know that edge badnesses are going only to increase
1390 (we introduced new call site) and thus all we need is to insert newly
1391 created edges into heap. */
1393 static void
1394 update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
1395 bitmap updated_nodes)
1397 struct cgraph_edge *e = node->callees;
1399 if (!e)
1400 return;
1401 while (true)
1402 if (!e->inline_failed && e->callee->callees)
1403 e = e->callee->callees;
1404 else
1406 enum availability avail;
1407 struct cgraph_node *callee;
1408 /* We do not reset callee growth cache here. Since we added a new call,
1409 growth chould have just increased and consequentely badness metric
1410 don't need updating. */
1411 if (e->inline_failed
1412 && (callee = e->callee->ultimate_alias_target (&avail))
1413 && inline_summaries->get (callee)->inlinable
1414 && avail >= AVAIL_AVAILABLE
1415 && !bitmap_bit_p (updated_nodes, callee->uid))
1417 if (can_inline_edge_p (e, false)
1418 && want_inline_small_function_p (e, false))
1419 update_edge_key (heap, e);
1420 else if (e->aux)
1422 report_inline_failed_reason (e);
1423 heap->delete_node ((edge_heap_node_t *) e->aux);
1424 e->aux = NULL;
1427 if (e->next_callee)
1428 e = e->next_callee;
1429 else
1433 if (e->caller == node)
1434 return;
1435 e = e->caller->callers;
1437 while (!e->next_callee);
1438 e = e->next_callee;
1443 /* Enqueue all recursive calls from NODE into priority queue depending on
1444 how likely we want to recursively inline the call. */
1446 static void
1447 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1448 edge_heap_t *heap)
1450 struct cgraph_edge *e;
1451 enum availability avail;
1453 for (e = where->callees; e; e = e->next_callee)
1454 if (e->callee == node
1455 || (e->callee->ultimate_alias_target (&avail) == node
1456 && avail > AVAIL_INTERPOSABLE))
1458 /* When profile feedback is available, prioritize by expected number
1459 of calls. */
1460 heap->insert (!max_count ? -e->frequency
1461 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
1464 for (e = where->callees; e; e = e->next_callee)
1465 if (!e->inline_failed)
1466 lookup_recursive_calls (node, e->callee, heap);
1469 /* Decide on recursive inlining: in the case function has recursive calls,
1470 inline until body size reaches given argument. If any new indirect edges
1471 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1472 is NULL. */
1474 static bool
1475 recursive_inlining (struct cgraph_edge *edge,
1476 vec<cgraph_edge *> *new_edges)
1478 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1479 edge_heap_t heap (sreal::min ());
1480 struct cgraph_node *node;
1481 struct cgraph_edge *e;
1482 struct cgraph_node *master_clone = NULL, *next;
1483 int depth = 0;
1484 int n = 0;
1486 node = edge->caller;
1487 if (node->global.inlined_to)
1488 node = node->global.inlined_to;
1490 if (DECL_DECLARED_INLINE_P (node->decl))
1491 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1493 /* Make sure that function is small enough to be considered for inlining. */
1494 if (estimate_size_after_inlining (node, edge) >= limit)
1495 return false;
1496 lookup_recursive_calls (node, node, &heap);
1497 if (heap.empty ())
1498 return false;
1500 if (dump_file)
1501 fprintf (dump_file,
1502 " Performing recursive inlining on %s\n",
1503 node->name ());
1505 /* Do the inlining and update list of recursive call during process. */
1506 while (!heap.empty ())
1508 struct cgraph_edge *curr = heap.extract_min ();
1509 struct cgraph_node *cnode, *dest = curr->callee;
1511 if (!can_inline_edge_p (curr, true))
1512 continue;
1514 /* MASTER_CLONE is produced in the case we already started modified
1515 the function. Be sure to redirect edge to the original body before
1516 estimating growths otherwise we will be seeing growths after inlining
1517 the already modified body. */
1518 if (master_clone)
1520 curr->redirect_callee (master_clone);
1521 reset_edge_growth_cache (curr);
1524 if (estimate_size_after_inlining (node, curr) > limit)
1526 curr->redirect_callee (dest);
1527 reset_edge_growth_cache (curr);
1528 break;
1531 depth = 1;
1532 for (cnode = curr->caller;
1533 cnode->global.inlined_to; cnode = cnode->callers->caller)
1534 if (node->decl
1535 == curr->callee->ultimate_alias_target ()->decl)
1536 depth++;
1538 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1540 curr->redirect_callee (dest);
1541 reset_edge_growth_cache (curr);
1542 continue;
1545 if (dump_file)
1547 fprintf (dump_file,
1548 " Inlining call of depth %i", depth);
1549 if (node->count)
1551 fprintf (dump_file, " called approx. %.2f times per call",
1552 (double)curr->count / node->count);
1554 fprintf (dump_file, "\n");
1556 if (!master_clone)
1558 /* We need original clone to copy around. */
1559 master_clone = node->create_clone (node->decl, node->count,
1560 CGRAPH_FREQ_BASE, false, vNULL,
1561 true, NULL, NULL);
1562 for (e = master_clone->callees; e; e = e->next_callee)
1563 if (!e->inline_failed)
1564 clone_inlined_nodes (e, true, false, NULL, CGRAPH_FREQ_BASE);
1565 curr->redirect_callee (master_clone);
1566 reset_edge_growth_cache (curr);
1569 inline_call (curr, false, new_edges, &overall_size, true);
1570 lookup_recursive_calls (node, curr->callee, &heap);
1571 n++;
1574 if (!heap.empty () && dump_file)
1575 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1577 if (!master_clone)
1578 return false;
1580 if (dump_file)
1581 fprintf (dump_file,
1582 "\n Inlined %i times, "
1583 "body grown from size %i to %i, time %i to %i\n", n,
1584 inline_summaries->get (master_clone)->size, inline_summaries->get (node)->size,
1585 inline_summaries->get (master_clone)->time, inline_summaries->get (node)->time);
1587 /* Remove master clone we used for inlining. We rely that clones inlined
1588 into master clone gets queued just before master clone so we don't
1589 need recursion. */
1590 for (node = symtab->first_function (); node != master_clone;
1591 node = next)
1593 next = symtab->next_function (node);
1594 if (node->global.inlined_to == master_clone)
1595 node->remove ();
1597 master_clone->remove ();
1598 return true;
1602 /* Given whole compilation unit estimate of INSNS, compute how large we can
1603 allow the unit to grow. */
1605 static int
1606 compute_max_insns (int insns)
1608 int max_insns = insns;
1609 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1610 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1612 return ((int64_t) max_insns
1613 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1617 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1619 static void
1620 add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges)
1622 while (new_edges.length () > 0)
1624 struct cgraph_edge *edge = new_edges.pop ();
1626 gcc_assert (!edge->aux);
1627 if (edge->inline_failed
1628 && can_inline_edge_p (edge, true)
1629 && want_inline_small_function_p (edge, true))
1630 edge->aux = heap->insert (edge_badness (edge, false), edge);
1634 /* Remove EDGE from the fibheap. */
1636 static void
1637 heap_edge_removal_hook (struct cgraph_edge *e, void *data)
1639 if (e->aux)
1641 ((edge_heap_t *)data)->delete_node ((edge_heap_node_t *)e->aux);
1642 e->aux = NULL;
1646 /* Return true if speculation of edge E seems useful.
1647 If ANTICIPATE_INLINING is true, be conservative and hope that E
1648 may get inlined. */
1650 bool
1651 speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
1653 enum availability avail;
1654 struct cgraph_node *target = e->callee->ultimate_alias_target (&avail);
1655 struct cgraph_edge *direct, *indirect;
1656 struct ipa_ref *ref;
1658 gcc_assert (e->speculative && !e->indirect_unknown_callee);
1660 if (!e->maybe_hot_p ())
1661 return false;
1663 /* See if IP optimizations found something potentially useful about the
1664 function. For now we look only for CONST/PURE flags. Almost everything
1665 else we propagate is useless. */
1666 if (avail >= AVAIL_AVAILABLE)
1668 int ecf_flags = flags_from_decl_or_type (target->decl);
1669 if (ecf_flags & ECF_CONST)
1671 e->speculative_call_info (direct, indirect, ref);
1672 if (!(indirect->indirect_info->ecf_flags & ECF_CONST))
1673 return true;
1675 else if (ecf_flags & ECF_PURE)
1677 e->speculative_call_info (direct, indirect, ref);
1678 if (!(indirect->indirect_info->ecf_flags & ECF_PURE))
1679 return true;
1682 /* If we did not managed to inline the function nor redirect
1683 to an ipa-cp clone (that are seen by having local flag set),
1684 it is probably pointless to inline it unless hardware is missing
1685 indirect call predictor. */
1686 if (!anticipate_inlining && e->inline_failed && !target->local.local)
1687 return false;
1688 /* For overwritable targets there is not much to do. */
1689 if (e->inline_failed && !can_inline_edge_p (e, false, true))
1690 return false;
1691 /* OK, speculation seems interesting. */
1692 return true;
1695 /* We know that EDGE is not going to be inlined.
1696 See if we can remove speculation. */
1698 static void
1699 resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
1701 if (edge->speculative && !speculation_useful_p (edge, false))
1703 struct cgraph_node *node = edge->caller;
1704 struct cgraph_node *where = node->global.inlined_to
1705 ? node->global.inlined_to : node;
1706 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1708 spec_rem += edge->count;
1709 edge->resolve_speculation ();
1710 reset_edge_caches (where);
1711 inline_update_overall_summary (where);
1712 update_caller_keys (edge_heap, where,
1713 updated_nodes, NULL);
1714 update_callee_keys (edge_heap, where,
1715 updated_nodes);
1716 BITMAP_FREE (updated_nodes);
1720 /* Return true if NODE should be accounted for overall size estimate.
1721 Skip all nodes optimized for size so we can measure the growth of hot
1722 part of program no matter of the padding. */
1724 bool
1725 inline_account_function_p (struct cgraph_node *node)
1727 return (!DECL_EXTERNAL (node->decl)
1728 && !opt_for_fn (node->decl, optimize_size)
1729 && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED);
1732 /* Count number of callers of NODE and store it into DATA (that
1733 points to int. Worker for cgraph_for_node_and_aliases. */
1735 static bool
1736 sum_callers (struct cgraph_node *node, void *data)
1738 struct cgraph_edge *e;
1739 int *num_calls = (int *)data;
1741 for (e = node->callers; e; e = e->next_caller)
1742 (*num_calls)++;
1743 return false;
1746 /* We use greedy algorithm for inlining of small functions:
1747 All inline candidates are put into prioritized heap ordered in
1748 increasing badness.
1750 The inlining of small functions is bounded by unit growth parameters. */
1752 static void
1753 inline_small_functions (void)
1755 struct cgraph_node *node;
1756 struct cgraph_edge *edge;
1757 edge_heap_t edge_heap (sreal::min ());
1758 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1759 int min_size, max_size;
1760 auto_vec<cgraph_edge *> new_indirect_edges;
1761 int initial_size = 0;
1762 struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
1763 struct cgraph_edge_hook_list *edge_removal_hook_holder;
1764 new_indirect_edges.create (8);
1766 edge_removal_hook_holder
1767 = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap);
1769 /* Compute overall unit size and other global parameters used by badness
1770 metrics. */
1772 max_count = 0;
1773 ipa_reduced_postorder (order, true, true, NULL);
1774 free (order);
1776 FOR_EACH_DEFINED_FUNCTION (node)
1777 if (!node->global.inlined_to)
1779 if (!node->alias && node->analyzed
1780 && (node->has_gimple_body_p () || node->thunk.thunk_p))
1782 struct inline_summary *info = inline_summaries->get (node);
1783 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
1785 /* Do not account external functions, they will be optimized out
1786 if not inlined. Also only count the non-cold portion of program. */
1787 if (inline_account_function_p (node))
1788 initial_size += info->size;
1789 info->growth = estimate_growth (node);
1791 int num_calls = 0;
1792 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
1793 true);
1794 if (num_calls == 1)
1795 info->single_caller = true;
1796 if (dfs && dfs->next_cycle)
1798 struct cgraph_node *n2;
1799 int id = dfs->scc_no + 1;
1800 for (n2 = node; n2;
1801 n2 = ((struct ipa_dfs_info *) node->aux)->next_cycle)
1803 struct inline_summary *info2 = inline_summaries->get (n2);
1804 if (info2->scc_no)
1805 break;
1806 info2->scc_no = id;
1811 for (edge = node->callers; edge; edge = edge->next_caller)
1812 if (max_count < edge->count)
1813 max_count = edge->count;
1815 ipa_free_postorder_info ();
1816 initialize_growth_caches ();
1818 if (dump_file)
1819 fprintf (dump_file,
1820 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1821 initial_size);
1823 overall_size = initial_size;
1824 max_size = compute_max_insns (overall_size);
1825 min_size = overall_size;
1827 /* Populate the heap with all edges we might inline. */
1829 FOR_EACH_DEFINED_FUNCTION (node)
1831 bool update = false;
1832 struct cgraph_edge *next = NULL;
1833 bool has_speculative = false;
1835 if (dump_file)
1836 fprintf (dump_file, "Enqueueing calls in %s/%i.\n",
1837 node->name (), node->order);
1839 for (edge = node->callees; edge; edge = next)
1841 next = edge->next_callee;
1842 if (edge->inline_failed
1843 && !edge->aux
1844 && can_inline_edge_p (edge, true)
1845 && want_inline_small_function_p (edge, true)
1846 && edge->inline_failed)
1848 gcc_assert (!edge->aux);
1849 update_edge_key (&edge_heap, edge);
1851 if (edge->speculative)
1852 has_speculative = true;
1854 if (has_speculative)
1855 for (edge = node->callees; edge; edge = next)
1856 if (edge->speculative && !speculation_useful_p (edge,
1857 edge->aux != NULL))
1859 edge->resolve_speculation ();
1860 update = true;
1862 if (update)
1864 struct cgraph_node *where = node->global.inlined_to
1865 ? node->global.inlined_to : node;
1866 inline_update_overall_summary (where);
1867 reset_edge_caches (where);
1868 update_caller_keys (&edge_heap, where,
1869 updated_nodes, NULL);
1870 update_callee_keys (&edge_heap, where,
1871 updated_nodes);
1872 bitmap_clear (updated_nodes);
1876 gcc_assert (in_lto_p
1877 || !max_count
1878 || (profile_info && flag_branch_probabilities));
1880 while (!edge_heap.empty ())
1882 int old_size = overall_size;
1883 struct cgraph_node *where, *callee;
1884 sreal badness = edge_heap.min_key ();
1885 sreal current_badness;
1886 int growth;
1888 edge = edge_heap.extract_min ();
1889 gcc_assert (edge->aux);
1890 edge->aux = NULL;
1891 if (!edge->inline_failed || !edge->callee->analyzed)
1892 continue;
1894 #ifdef ENABLE_CHECKING
1895 /* Be sure that caches are maintained consistent. */
1896 sreal cached_badness = edge_badness (edge, false);
1898 int old_size_est = estimate_edge_size (edge);
1899 int old_time_est = estimate_edge_time (edge);
1900 int old_hints_est = estimate_edge_hints (edge);
1902 reset_edge_growth_cache (edge);
1903 gcc_assert (old_size_est == estimate_edge_size (edge));
1904 gcc_assert (old_time_est == estimate_edge_time (edge));
1905 /* FIXME:
1907 gcc_assert (old_hints_est == estimate_edge_hints (edge));
1909 fails with profile feedback because some hints depends on
1910 maybe_hot_edge_p predicate and because callee gets inlined to other
1911 calls, the edge may become cold.
1912 This ought to be fixed by computing relative probabilities
1913 for given invocation but that will be better done once whole
1914 code is converted to sreals. Disable for now and revert to "wrong"
1915 value so enable/disable checking paths agree. */
1916 edge_growth_cache[edge->uid].hints = old_hints_est + 1;
1918 /* When updating the edge costs, we only decrease badness in the keys.
1919 Increases of badness are handled lazilly; when we see key with out
1920 of date value on it, we re-insert it now. */
1921 current_badness = edge_badness (edge, false);
1922 /* Disable checking for profile because roundoff errors may cause slight
1923 deviations in the order. */
1924 gcc_assert (max_count || cached_badness == current_badness);
1925 gcc_assert (current_badness >= badness);
1926 #else
1927 current_badness = edge_badness (edge, false);
1928 #endif
1929 if (current_badness != badness)
1931 if (edge_heap.min () && current_badness > edge_heap.min_key ())
1933 edge->aux = edge_heap.insert (current_badness, edge);
1934 continue;
1936 else
1937 badness = current_badness;
1940 if (!can_inline_edge_p (edge, true))
1942 resolve_noninline_speculation (&edge_heap, edge);
1943 continue;
1946 callee = edge->callee->ultimate_alias_target ();
1947 growth = estimate_edge_growth (edge);
1948 if (dump_file)
1950 fprintf (dump_file,
1951 "\nConsidering %s/%i with %i size\n",
1952 callee->name (), callee->order,
1953 inline_summaries->get (callee)->size);
1954 fprintf (dump_file,
1955 " to be inlined into %s/%i in %s:%i\n"
1956 " Estimated badness is %f, frequency %.2f.\n",
1957 edge->caller->name (), edge->caller->order,
1958 edge->call_stmt
1959 && (LOCATION_LOCUS (gimple_location ((const_gimple)
1960 edge->call_stmt))
1961 > BUILTINS_LOCATION)
1962 ? gimple_filename ((const_gimple) edge->call_stmt)
1963 : "unknown",
1964 edge->call_stmt
1965 ? gimple_lineno ((const_gimple) edge->call_stmt)
1966 : -1,
1967 badness.to_double (),
1968 edge->frequency / (double)CGRAPH_FREQ_BASE);
1969 if (edge->count)
1970 fprintf (dump_file," Called %" PRId64"x\n",
1971 edge->count);
1972 if (dump_flags & TDF_DETAILS)
1973 edge_badness (edge, true);
1976 if (overall_size + growth > max_size
1977 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1979 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1980 report_inline_failed_reason (edge);
1981 resolve_noninline_speculation (&edge_heap, edge);
1982 continue;
1985 if (!want_inline_small_function_p (edge, true))
1987 resolve_noninline_speculation (&edge_heap, edge);
1988 continue;
1991 /* Heuristics for inlining small functions work poorly for
1992 recursive calls where we do effects similar to loop unrolling.
1993 When inlining such edge seems profitable, leave decision on
1994 specific inliner. */
1995 if (edge->recursive_p ())
1997 where = edge->caller;
1998 if (where->global.inlined_to)
1999 where = where->global.inlined_to;
2000 if (!recursive_inlining (edge,
2001 opt_for_fn (edge->caller->decl,
2002 flag_indirect_inlining)
2003 ? &new_indirect_edges : NULL))
2005 edge->inline_failed = CIF_RECURSIVE_INLINING;
2006 resolve_noninline_speculation (&edge_heap, edge);
2007 continue;
2009 reset_edge_caches (where);
2010 /* Recursive inliner inlines all recursive calls of the function
2011 at once. Consequently we need to update all callee keys. */
2012 if (opt_for_fn (edge->caller->decl, flag_indirect_inlining))
2013 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
2014 update_callee_keys (&edge_heap, where, updated_nodes);
2015 bitmap_clear (updated_nodes);
2017 else
2019 struct cgraph_node *outer_node = NULL;
2020 int depth = 0;
2022 /* Consider the case where self recursive function A is inlined
2023 into B. This is desired optimization in some cases, since it
2024 leads to effect similar of loop peeling and we might completely
2025 optimize out the recursive call. However we must be extra
2026 selective. */
2028 where = edge->caller;
2029 while (where->global.inlined_to)
2031 if (where->decl == callee->decl)
2032 outer_node = where, depth++;
2033 where = where->callers->caller;
2035 if (outer_node
2036 && !want_inline_self_recursive_call_p (edge, outer_node,
2037 true, depth))
2039 edge->inline_failed
2040 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
2041 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
2042 resolve_noninline_speculation (&edge_heap, edge);
2043 continue;
2045 else if (depth && dump_file)
2046 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
2048 gcc_checking_assert (!callee->global.inlined_to);
2049 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
2050 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
2052 reset_edge_caches (edge->callee->function_symbol ());
2054 update_callee_keys (&edge_heap, where, updated_nodes);
2056 where = edge->caller;
2057 if (where->global.inlined_to)
2058 where = where->global.inlined_to;
2060 /* Our profitability metric can depend on local properties
2061 such as number of inlinable calls and size of the function body.
2062 After inlining these properties might change for the function we
2063 inlined into (since it's body size changed) and for the functions
2064 called by function we inlined (since number of it inlinable callers
2065 might change). */
2066 update_caller_keys (&edge_heap, where, updated_nodes, NULL);
2067 /* Offline copy count has possibly changed, recompute if profile is
2068 available. */
2069 if (max_count)
2071 struct cgraph_node *n = cgraph_node::get (edge->callee->decl);
2072 if (n != edge->callee && n->analyzed)
2073 update_callee_keys (&edge_heap, n, updated_nodes);
2075 bitmap_clear (updated_nodes);
2077 if (dump_file)
2079 fprintf (dump_file,
2080 " Inlined into %s which now has time %i and size %i,"
2081 "net change of %+i.\n",
2082 edge->caller->name (),
2083 inline_summaries->get (edge->caller)->time,
2084 inline_summaries->get (edge->caller)->size,
2085 overall_size - old_size);
2087 if (min_size > overall_size)
2089 min_size = overall_size;
2090 max_size = compute_max_insns (min_size);
2092 if (dump_file)
2093 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
2097 free_growth_caches ();
2098 if (dump_file)
2099 fprintf (dump_file,
2100 "Unit growth for small function inlining: %i->%i (%i%%)\n",
2101 initial_size, overall_size,
2102 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
2103 BITMAP_FREE (updated_nodes);
2104 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
2107 /* Flatten NODE. Performed both during early inlining and
2108 at IPA inlining time. */
2110 static void
2111 flatten_function (struct cgraph_node *node, bool early)
2113 struct cgraph_edge *e;
2115 /* We shouldn't be called recursively when we are being processed. */
2116 gcc_assert (node->aux == NULL);
2118 node->aux = (void *) node;
2120 for (e = node->callees; e; e = e->next_callee)
2122 struct cgraph_node *orig_callee;
2123 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2125 /* We've hit cycle? It is time to give up. */
2126 if (callee->aux)
2128 if (dump_file)
2129 fprintf (dump_file,
2130 "Not inlining %s into %s to avoid cycle.\n",
2131 xstrdup_for_dump (callee->name ()),
2132 xstrdup_for_dump (e->caller->name ()));
2133 e->inline_failed = CIF_RECURSIVE_INLINING;
2134 continue;
2137 /* When the edge is already inlined, we just need to recurse into
2138 it in order to fully flatten the leaves. */
2139 if (!e->inline_failed)
2141 flatten_function (callee, early);
2142 continue;
2145 /* Flatten attribute needs to be processed during late inlining. For
2146 extra code quality we however do flattening during early optimization,
2147 too. */
2148 if (!early
2149 ? !can_inline_edge_p (e, true)
2150 : !can_early_inline_edge_p (e))
2151 continue;
2153 if (e->recursive_p ())
2155 if (dump_file)
2156 fprintf (dump_file, "Not inlining: recursive call.\n");
2157 continue;
2160 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
2161 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
2163 if (dump_file)
2164 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
2165 continue;
2168 /* Inline the edge and flatten the inline clone. Avoid
2169 recursing through the original node if the node was cloned. */
2170 if (dump_file)
2171 fprintf (dump_file, " Inlining %s into %s.\n",
2172 xstrdup_for_dump (callee->name ()),
2173 xstrdup_for_dump (e->caller->name ()));
2174 orig_callee = callee;
2175 inline_call (e, true, NULL, NULL, false);
2176 if (e->callee != orig_callee)
2177 orig_callee->aux = (void *) node;
2178 flatten_function (e->callee, early);
2179 if (e->callee != orig_callee)
2180 orig_callee->aux = NULL;
2183 node->aux = NULL;
2184 if (!node->global.inlined_to)
2185 inline_update_overall_summary (node);
2188 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
2189 DATA points to number of calls originally found so we avoid infinite
2190 recursion. */
2192 static bool
2193 inline_to_all_callers (struct cgraph_node *node, void *data)
2195 int *num_calls = (int *)data;
2196 bool callee_removed = false;
2198 while (node->callers && !node->global.inlined_to)
2200 struct cgraph_node *caller = node->callers->caller;
2202 if (!can_inline_edge_p (node->callers, true)
2203 || node->callers->recursive_p ())
2205 if (dump_file)
2206 fprintf (dump_file, "Uninlinable call found; giving up.\n");
2207 *num_calls = 0;
2208 return false;
2211 if (dump_file)
2213 fprintf (dump_file,
2214 "\nInlining %s size %i.\n",
2215 node->name (),
2216 inline_summaries->get (node)->size);
2217 fprintf (dump_file,
2218 " Called once from %s %i insns.\n",
2219 node->callers->caller->name (),
2220 inline_summaries->get (node->callers->caller)->size);
2223 inline_call (node->callers, true, NULL, NULL, true, &callee_removed);
2224 if (dump_file)
2225 fprintf (dump_file,
2226 " Inlined into %s which now has %i size\n",
2227 caller->name (),
2228 inline_summaries->get (caller)->size);
2229 if (!(*num_calls)--)
2231 if (dump_file)
2232 fprintf (dump_file, "New calls found; giving up.\n");
2233 return callee_removed;
2235 if (callee_removed)
2236 return true;
2238 return false;
2241 /* Output overall time estimate. */
2242 static void
2243 dump_overall_stats (void)
2245 int64_t sum_weighted = 0, sum = 0;
2246 struct cgraph_node *node;
2248 FOR_EACH_DEFINED_FUNCTION (node)
2249 if (!node->global.inlined_to
2250 && !node->alias)
2252 int time = inline_summaries->get (node)->time;
2253 sum += time;
2254 sum_weighted += time * node->count;
2256 fprintf (dump_file, "Overall time estimate: "
2257 "%" PRId64" weighted by profile: "
2258 "%" PRId64"\n", sum, sum_weighted);
2261 /* Output some useful stats about inlining. */
2263 static void
2264 dump_inline_stats (void)
2266 int64_t inlined_cnt = 0, inlined_indir_cnt = 0;
2267 int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0;
2268 int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0;
2269 int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
2270 int64_t inlined_speculative = 0, inlined_speculative_ply = 0;
2271 int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
2272 int64_t reason[CIF_N_REASONS][3];
2273 int i;
2274 struct cgraph_node *node;
2276 memset (reason, 0, sizeof (reason));
2277 FOR_EACH_DEFINED_FUNCTION (node)
2279 struct cgraph_edge *e;
2280 for (e = node->callees; e; e = e->next_callee)
2282 if (e->inline_failed)
2284 reason[(int) e->inline_failed][0] += e->count;
2285 reason[(int) e->inline_failed][1] += e->frequency;
2286 reason[(int) e->inline_failed][2] ++;
2287 if (DECL_VIRTUAL_P (e->callee->decl))
2289 if (e->indirect_inlining_edge)
2290 noninlined_virt_indir_cnt += e->count;
2291 else
2292 noninlined_virt_cnt += e->count;
2294 else
2296 if (e->indirect_inlining_edge)
2297 noninlined_indir_cnt += e->count;
2298 else
2299 noninlined_cnt += e->count;
2302 else
2304 if (e->speculative)
2306 if (DECL_VIRTUAL_P (e->callee->decl))
2307 inlined_speculative_ply += e->count;
2308 else
2309 inlined_speculative += e->count;
2311 else if (DECL_VIRTUAL_P (e->callee->decl))
2313 if (e->indirect_inlining_edge)
2314 inlined_virt_indir_cnt += e->count;
2315 else
2316 inlined_virt_cnt += e->count;
2318 else
2320 if (e->indirect_inlining_edge)
2321 inlined_indir_cnt += e->count;
2322 else
2323 inlined_cnt += e->count;
2327 for (e = node->indirect_calls; e; e = e->next_callee)
2328 if (e->indirect_info->polymorphic)
2329 indirect_poly_cnt += e->count;
2330 else
2331 indirect_cnt += e->count;
2333 if (max_count)
2335 fprintf (dump_file,
2336 "Inlined %" PRId64 " + speculative "
2337 "%" PRId64 " + speculative polymorphic "
2338 "%" PRId64 " + previously indirect "
2339 "%" PRId64 " + virtual "
2340 "%" PRId64 " + virtual and previously indirect "
2341 "%" PRId64 "\n" "Not inlined "
2342 "%" PRId64 " + previously indirect "
2343 "%" PRId64 " + virtual "
2344 "%" PRId64 " + virtual and previously indirect "
2345 "%" PRId64 " + stil indirect "
2346 "%" PRId64 " + still indirect polymorphic "
2347 "%" PRId64 "\n", inlined_cnt,
2348 inlined_speculative, inlined_speculative_ply,
2349 inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
2350 noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
2351 noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
2352 fprintf (dump_file,
2353 "Removed speculations %" PRId64 "\n",
2354 spec_rem);
2356 dump_overall_stats ();
2357 fprintf (dump_file, "\nWhy inlining failed?\n");
2358 for (i = 0; i < CIF_N_REASONS; i++)
2359 if (reason[i][2])
2360 fprintf (dump_file, "%-50s: %8i calls, %8i freq, %" PRId64" count\n",
2361 cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
2362 (int) reason[i][2], (int) reason[i][1], reason[i][0]);
2365 /* Decide on the inlining. We do so in the topological order to avoid
2366 expenses on updating data structures. */
2368 static unsigned int
2369 ipa_inline (void)
2371 struct cgraph_node *node;
2372 int nnodes;
2373 struct cgraph_node **order;
2374 int i;
2375 int cold;
2376 bool remove_functions = false;
2378 if (!optimize)
2379 return 0;
2381 cgraph_freq_base_rec = (sreal) 1 / (sreal) CGRAPH_FREQ_BASE;
2382 percent_rec = (sreal) 1 / (sreal) 100;
2384 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
2386 if (in_lto_p && optimize)
2387 ipa_update_after_lto_read ();
2389 if (dump_file)
2390 dump_inline_summaries (dump_file);
2392 nnodes = ipa_reverse_postorder (order);
2394 FOR_EACH_FUNCTION (node)
2396 node->aux = 0;
2398 /* Recompute the default reasons for inlining because they may have
2399 changed during merging. */
2400 if (in_lto_p)
2402 for (cgraph_edge *e = node->callees; e; e = e->next_callee)
2404 gcc_assert (e->inline_failed);
2405 initialize_inline_failed (e);
2407 for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
2408 initialize_inline_failed (e);
2412 if (dump_file)
2413 fprintf (dump_file, "\nFlattening functions:\n");
2415 /* In the first pass handle functions to be flattened. Do this with
2416 a priority so none of our later choices will make this impossible. */
2417 for (i = nnodes - 1; i >= 0; i--)
2419 node = order[i];
2421 /* Handle nodes to be flattened.
2422 Ideally when processing callees we stop inlining at the
2423 entry of cycles, possibly cloning that entry point and
2424 try to flatten itself turning it into a self-recursive
2425 function. */
2426 if (lookup_attribute ("flatten",
2427 DECL_ATTRIBUTES (node->decl)) != NULL)
2429 if (dump_file)
2430 fprintf (dump_file,
2431 "Flattening %s\n", node->name ());
2432 flatten_function (node, false);
2435 if (dump_file)
2436 dump_overall_stats ();
2438 inline_small_functions ();
2440 gcc_assert (symtab->state == IPA_SSA);
2441 symtab->state = IPA_SSA_AFTER_INLINING;
2442 /* Do first after-inlining removal. We want to remove all "stale" extern
2443 inline functions and virtual functions so we really know what is called
2444 once. */
2445 symtab->remove_unreachable_nodes (dump_file);
2446 free (order);
2448 /* Inline functions with a property that after inlining into all callers the
2449 code size will shrink because the out-of-line copy is eliminated.
2450 We do this regardless on the callee size as long as function growth limits
2451 are met. */
2452 if (dump_file)
2453 fprintf (dump_file,
2454 "\nDeciding on functions to be inlined into all callers and "
2455 "removing useless speculations:\n");
2457 /* Inlining one function called once has good chance of preventing
2458 inlining other function into the same callee. Ideally we should
2459 work in priority order, but probably inlining hot functions first
2460 is good cut without the extra pain of maintaining the queue.
2462 ??? this is not really fitting the bill perfectly: inlining function
2463 into callee often leads to better optimization of callee due to
2464 increased context for optimization.
2465 For example if main() function calls a function that outputs help
2466 and then function that does the main optmization, we should inline
2467 the second with priority even if both calls are cold by themselves.
2469 We probably want to implement new predicate replacing our use of
2470 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2471 to be hot. */
2472 for (cold = 0; cold <= 1; cold ++)
2474 FOR_EACH_DEFINED_FUNCTION (node)
2476 struct cgraph_edge *edge, *next;
2477 bool update=false;
2479 for (edge = node->callees; edge; edge = next)
2481 next = edge->next_callee;
2482 if (edge->speculative && !speculation_useful_p (edge, false))
2484 edge->resolve_speculation ();
2485 spec_rem += edge->count;
2486 update = true;
2487 remove_functions = true;
2490 if (update)
2492 struct cgraph_node *where = node->global.inlined_to
2493 ? node->global.inlined_to : node;
2494 reset_edge_caches (where);
2495 inline_update_overall_summary (where);
2497 if (want_inline_function_to_all_callers_p (node, cold))
2499 int num_calls = 0;
2500 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
2501 true);
2502 while (node->call_for_symbol_and_aliases
2503 (inline_to_all_callers, &num_calls, true))
2505 remove_functions = true;
2510 /* Free ipa-prop structures if they are no longer needed. */
2511 if (optimize)
2512 ipa_free_all_structures_after_iinln ();
2514 if (dump_file)
2516 fprintf (dump_file,
2517 "\nInlined %i calls, eliminated %i functions\n\n",
2518 ncalls_inlined, nfunctions_inlined);
2519 dump_inline_stats ();
2522 if (dump_file)
2523 dump_inline_summaries (dump_file);
2524 /* In WPA we use inline summaries for partitioning process. */
2525 if (!flag_wpa)
2526 inline_free_summary ();
2527 return remove_functions ? TODO_remove_functions : 0;
2530 /* Inline always-inline function calls in NODE. */
2532 static bool
2533 inline_always_inline_functions (struct cgraph_node *node)
2535 struct cgraph_edge *e;
2536 bool inlined = false;
2538 for (e = node->callees; e; e = e->next_callee)
2540 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2541 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
2542 continue;
2544 if (e->recursive_p ())
2546 if (dump_file)
2547 fprintf (dump_file, " Not inlining recursive call to %s.\n",
2548 e->callee->name ());
2549 e->inline_failed = CIF_RECURSIVE_INLINING;
2550 continue;
2553 if (!can_early_inline_edge_p (e))
2555 /* Set inlined to true if the callee is marked "always_inline" but
2556 is not inlinable. This will allow flagging an error later in
2557 expand_call_inline in tree-inline.c. */
2558 if (lookup_attribute ("always_inline",
2559 DECL_ATTRIBUTES (callee->decl)) != NULL)
2560 inlined = true;
2561 continue;
2564 if (dump_file)
2565 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
2566 xstrdup_for_dump (e->callee->name ()),
2567 xstrdup_for_dump (e->caller->name ()));
2568 inline_call (e, true, NULL, NULL, false);
2569 inlined = true;
2571 if (inlined)
2572 inline_update_overall_summary (node);
2574 return inlined;
2577 /* Decide on the inlining. We do so in the topological order to avoid
2578 expenses on updating data structures. */
2580 static bool
2581 early_inline_small_functions (struct cgraph_node *node)
2583 struct cgraph_edge *e;
2584 bool inlined = false;
2586 for (e = node->callees; e; e = e->next_callee)
2588 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2589 if (!inline_summaries->get (callee)->inlinable
2590 || !e->inline_failed)
2591 continue;
2593 /* Do not consider functions not declared inline. */
2594 if (!DECL_DECLARED_INLINE_P (callee->decl)
2595 && !opt_for_fn (node->decl, flag_inline_small_functions)
2596 && !opt_for_fn (node->decl, flag_inline_functions))
2597 continue;
2599 if (dump_file)
2600 fprintf (dump_file, "Considering inline candidate %s.\n",
2601 callee->name ());
2603 if (!can_early_inline_edge_p (e))
2604 continue;
2606 if (e->recursive_p ())
2608 if (dump_file)
2609 fprintf (dump_file, " Not inlining: recursive call.\n");
2610 continue;
2613 if (!want_early_inline_function_p (e))
2614 continue;
2616 if (dump_file)
2617 fprintf (dump_file, " Inlining %s into %s.\n",
2618 xstrdup_for_dump (callee->name ()),
2619 xstrdup_for_dump (e->caller->name ()));
2620 inline_call (e, true, NULL, NULL, true);
2621 inlined = true;
2624 return inlined;
2627 unsigned int
2628 early_inliner (function *fun)
2630 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2631 struct cgraph_edge *edge;
2632 unsigned int todo = 0;
2633 int iterations = 0;
2634 bool inlined = false;
2636 if (seen_error ())
2637 return 0;
2639 /* Do nothing if datastructures for ipa-inliner are already computed. This
2640 happens when some pass decides to construct new function and
2641 cgraph_add_new_function calls lowering passes and early optimization on
2642 it. This may confuse ourself when early inliner decide to inline call to
2643 function clone, because function clones don't have parameter list in
2644 ipa-prop matching their signature. */
2645 if (ipa_node_params_sum)
2646 return 0;
2648 #ifdef ENABLE_CHECKING
2649 node->verify ();
2650 #endif
2651 node->remove_all_references ();
2653 /* Rebuild this reference because it dosn't depend on
2654 function's body and it's required to pass cgraph_node
2655 verification. */
2656 if (node->instrumented_version
2657 && !node->instrumentation_clone)
2658 node->create_reference (node->instrumented_version, IPA_REF_CHKP, NULL);
2660 /* Even when not optimizing or not inlining inline always-inline
2661 functions. */
2662 inlined = inline_always_inline_functions (node);
2664 if (!optimize
2665 || flag_no_inline
2666 || !flag_early_inlining
2667 /* Never inline regular functions into always-inline functions
2668 during incremental inlining. This sucks as functions calling
2669 always inline functions will get less optimized, but at the
2670 same time inlining of functions calling always inline
2671 function into an always inline function might introduce
2672 cycles of edges to be always inlined in the callgraph.
2674 We might want to be smarter and just avoid this type of inlining. */
2675 || (DECL_DISREGARD_INLINE_LIMITS (node->decl)
2676 && lookup_attribute ("always_inline",
2677 DECL_ATTRIBUTES (node->decl))))
2679 else if (lookup_attribute ("flatten",
2680 DECL_ATTRIBUTES (node->decl)) != NULL)
2682 /* When the function is marked to be flattened, recursively inline
2683 all calls in it. */
2684 if (dump_file)
2685 fprintf (dump_file,
2686 "Flattening %s\n", node->name ());
2687 flatten_function (node, true);
2688 inlined = true;
2690 else
2692 /* If some always_inline functions was inlined, apply the changes.
2693 This way we will not account always inline into growth limits and
2694 moreover we will inline calls from always inlines that we skipped
2695 previously becuase of conditional above. */
2696 if (inlined)
2698 timevar_push (TV_INTEGRATION);
2699 todo |= optimize_inline_calls (current_function_decl);
2700 /* optimize_inline_calls call above might have introduced new
2701 statements that don't have inline parameters computed. */
2702 for (edge = node->callees; edge; edge = edge->next_callee)
2704 if (inline_edge_summary_vec.length () > (unsigned) edge->uid)
2706 struct inline_edge_summary *es = inline_edge_summary (edge);
2707 es->call_stmt_size
2708 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2709 es->call_stmt_time
2710 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2713 inline_update_overall_summary (node);
2714 inlined = false;
2715 timevar_pop (TV_INTEGRATION);
2717 /* We iterate incremental inlining to get trivial cases of indirect
2718 inlining. */
2719 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
2720 && early_inline_small_functions (node))
2722 timevar_push (TV_INTEGRATION);
2723 todo |= optimize_inline_calls (current_function_decl);
2725 /* Technically we ought to recompute inline parameters so the new
2726 iteration of early inliner works as expected. We however have
2727 values approximately right and thus we only need to update edge
2728 info that might be cleared out for newly discovered edges. */
2729 for (edge = node->callees; edge; edge = edge->next_callee)
2731 /* We have no summary for new bound store calls yet. */
2732 if (inline_edge_summary_vec.length () > (unsigned)edge->uid)
2734 struct inline_edge_summary *es = inline_edge_summary (edge);
2735 es->call_stmt_size
2736 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2737 es->call_stmt_time
2738 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2740 if (edge->callee->decl
2741 && !gimple_check_call_matching_types (
2742 edge->call_stmt, edge->callee->decl, false))
2743 edge->call_stmt_cannot_inline_p = true;
2745 if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
2746 inline_update_overall_summary (node);
2747 timevar_pop (TV_INTEGRATION);
2748 iterations++;
2749 inlined = false;
2751 if (dump_file)
2752 fprintf (dump_file, "Iterations: %i\n", iterations);
2755 if (inlined)
2757 timevar_push (TV_INTEGRATION);
2758 todo |= optimize_inline_calls (current_function_decl);
2759 timevar_pop (TV_INTEGRATION);
2762 fun->always_inline_functions_inlined = true;
2764 return todo;
2767 /* Do inlining of small functions. Doing so early helps profiling and other
2768 passes to be somewhat more effective and avoids some code duplication in
2769 later real inlining pass for testcases with very many function calls. */
2771 namespace {
2773 const pass_data pass_data_early_inline =
2775 GIMPLE_PASS, /* type */
2776 "einline", /* name */
2777 OPTGROUP_INLINE, /* optinfo_flags */
2778 TV_EARLY_INLINING, /* tv_id */
2779 PROP_ssa, /* properties_required */
2780 0, /* properties_provided */
2781 0, /* properties_destroyed */
2782 0, /* todo_flags_start */
2783 0, /* todo_flags_finish */
2786 class pass_early_inline : public gimple_opt_pass
2788 public:
2789 pass_early_inline (gcc::context *ctxt)
2790 : gimple_opt_pass (pass_data_early_inline, ctxt)
2793 /* opt_pass methods: */
2794 virtual unsigned int execute (function *);
2796 }; // class pass_early_inline
2798 unsigned int
2799 pass_early_inline::execute (function *fun)
2801 return early_inliner (fun);
2804 } // anon namespace
2806 gimple_opt_pass *
2807 make_pass_early_inline (gcc::context *ctxt)
2809 return new pass_early_inline (ctxt);
2812 namespace {
2814 const pass_data pass_data_ipa_inline =
2816 IPA_PASS, /* type */
2817 "inline", /* name */
2818 OPTGROUP_INLINE, /* optinfo_flags */
2819 TV_IPA_INLINING, /* tv_id */
2820 0, /* properties_required */
2821 0, /* properties_provided */
2822 0, /* properties_destroyed */
2823 0, /* todo_flags_start */
2824 ( TODO_dump_symtab ), /* todo_flags_finish */
2827 class pass_ipa_inline : public ipa_opt_pass_d
2829 public:
2830 pass_ipa_inline (gcc::context *ctxt)
2831 : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
2832 inline_generate_summary, /* generate_summary */
2833 inline_write_summary, /* write_summary */
2834 inline_read_summary, /* read_summary */
2835 NULL, /* write_optimization_summary */
2836 NULL, /* read_optimization_summary */
2837 NULL, /* stmt_fixup */
2838 0, /* function_transform_todo_flags_start */
2839 inline_transform, /* function_transform */
2840 NULL) /* variable_transform */
2843 /* opt_pass methods: */
2844 virtual unsigned int execute (function *) { return ipa_inline (); }
2846 }; // class pass_ipa_inline
2848 } // anon namespace
2850 ipa_opt_pass_d *
2851 make_pass_ipa_inline (gcc::context *ctxt)
2853 return new pass_ipa_inline (ctxt);