2016-10-17 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / ipa-inline.c
blob82bb94fa1aa230c69c210d5c5f3c24fcdbad020d
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
29 on).
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
34 inlining.
36 inlining heuristics
38 The inliner itself is split into two passes:
40 pass_early_inlining
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
55 flattening.
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
60 optimizers.
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
68 pass_ipa_inline
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "backend.h"
96 #include "target.h"
97 #include "rtl.h"
98 #include "tree.h"
99 #include "gimple.h"
100 #include "alloc-pool.h"
101 #include "tree-pass.h"
102 #include "gimple-ssa.h"
103 #include "cgraph.h"
104 #include "lto-streamer.h"
105 #include "trans-mem.h"
106 #include "calls.h"
107 #include "tree-inline.h"
108 #include "params.h"
109 #include "profile.h"
110 #include "symbol-summary.h"
111 #include "tree-vrp.h"
112 #include "ipa-prop.h"
113 #include "ipa-inline.h"
114 #include "ipa-utils.h"
115 #include "sreal.h"
116 #include "auto-profile.h"
117 #include "builtins.h"
118 #include "fibonacci_heap.h"
120 typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
121 typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
123 /* Statistics we collect about inlining algorithm. */
124 static int overall_size;
125 static gcov_type max_count;
126 static gcov_type spec_rem;
128 /* Pre-computed constants 1/CGRAPH_FREQ_BASE and 1/100. */
129 static sreal cgraph_freq_base_rec, percent_rec;
131 /* Return false when inlining edge E would lead to violating
132 limits on function unit growth or stack usage growth.
134 The relative function body growth limit is present generally
135 to avoid problems with non-linear behavior of the compiler.
136 To allow inlining huge functions into tiny wrapper, the limit
137 is always based on the bigger of the two functions considered.
139 For stack growth limits we always base the growth in stack usage
140 of the callers. We want to prevent applications from segfaulting
141 on stack overflow when functions with huge stack frames gets
142 inlined. */
144 static bool
145 caller_growth_limits (struct cgraph_edge *e)
147 struct cgraph_node *to = e->caller;
148 struct cgraph_node *what = e->callee->ultimate_alias_target ();
149 int newsize;
150 int limit = 0;
151 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
152 inline_summary *info, *what_info, *outer_info = inline_summaries->get (to);
154 /* Look for function e->caller is inlined to. While doing
155 so work out the largest function body on the way. As
156 described above, we want to base our function growth
157 limits based on that. Not on the self size of the
158 outer function, not on the self size of inline code
159 we immediately inline to. This is the most relaxed
160 interpretation of the rule "do not grow large functions
161 too much in order to prevent compiler from exploding". */
162 while (true)
164 info = inline_summaries->get (to);
165 if (limit < info->self_size)
166 limit = info->self_size;
167 if (stack_size_limit < info->estimated_self_stack_size)
168 stack_size_limit = info->estimated_self_stack_size;
169 if (to->global.inlined_to)
170 to = to->callers->caller;
171 else
172 break;
175 what_info = inline_summaries->get (what);
177 if (limit < what_info->self_size)
178 limit = what_info->self_size;
180 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
182 /* Check the size after inlining against the function limits. But allow
183 the function to shrink if it went over the limits by forced inlining. */
184 newsize = estimate_size_after_inlining (to, e);
185 if (newsize >= info->size
186 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
187 && newsize > limit)
189 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
190 return false;
193 if (!what_info->estimated_stack_size)
194 return true;
196 /* FIXME: Stack size limit often prevents inlining in Fortran programs
197 due to large i/o datastructures used by the Fortran front-end.
198 We ought to ignore this limit when we know that the edge is executed
199 on every invocation of the caller (i.e. its call statement dominates
200 exit block). We do not track this information, yet. */
201 stack_size_limit += ((gcov_type)stack_size_limit
202 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
204 inlined_stack = (outer_info->stack_frame_offset
205 + outer_info->estimated_self_stack_size
206 + what_info->estimated_stack_size);
207 /* Check new stack consumption with stack consumption at the place
208 stack is used. */
209 if (inlined_stack > stack_size_limit
210 /* If function already has large stack usage from sibling
211 inline call, we can inline, too.
212 This bit overoptimistically assume that we are good at stack
213 packing. */
214 && inlined_stack > info->estimated_stack_size
215 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
217 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
218 return false;
220 return true;
223 /* Dump info about why inlining has failed. */
225 static void
226 report_inline_failed_reason (struct cgraph_edge *e)
228 if (dump_file)
230 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
231 xstrdup_for_dump (e->caller->name ()), e->caller->order,
232 xstrdup_for_dump (e->callee->name ()), e->callee->order,
233 cgraph_inline_failed_string (e->inline_failed));
234 if ((e->inline_failed == CIF_TARGET_OPTION_MISMATCH
235 || e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
236 && e->caller->lto_file_data
237 && e->callee->ultimate_alias_target ()->lto_file_data)
239 fprintf (dump_file, " LTO objects: %s, %s\n",
240 e->caller->lto_file_data->file_name,
241 e->callee->ultimate_alias_target ()->lto_file_data->file_name);
243 if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH)
244 cl_target_option_print_diff
245 (dump_file, 2, target_opts_for_fn (e->caller->decl),
246 target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
247 if (e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
248 cl_optimization_print_diff
249 (dump_file, 2, opts_for_fn (e->caller->decl),
250 opts_for_fn (e->callee->ultimate_alias_target ()->decl));
254 /* Decide whether sanitizer-related attributes allow inlining. */
256 static bool
257 sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
259 /* Don't care if sanitizer is disabled */
260 if (!(flag_sanitize & SANITIZE_ADDRESS))
261 return true;
263 if (!caller || !callee)
264 return true;
266 return !!lookup_attribute ("no_sanitize_address",
267 DECL_ATTRIBUTES (caller)) ==
268 !!lookup_attribute ("no_sanitize_address",
269 DECL_ATTRIBUTES (callee));
272 /* Used for flags where it is safe to inline when caller's value is
273 grater than callee's. */
274 #define check_maybe_up(flag) \
275 (opts_for_fn (caller->decl)->x_##flag \
276 != opts_for_fn (callee->decl)->x_##flag \
277 && (!always_inline \
278 || opts_for_fn (caller->decl)->x_##flag \
279 < opts_for_fn (callee->decl)->x_##flag))
280 /* Used for flags where it is safe to inline when caller's value is
281 smaller than callee's. */
282 #define check_maybe_down(flag) \
283 (opts_for_fn (caller->decl)->x_##flag \
284 != opts_for_fn (callee->decl)->x_##flag \
285 && (!always_inline \
286 || opts_for_fn (caller->decl)->x_##flag \
287 > opts_for_fn (callee->decl)->x_##flag))
288 /* Used for flags where exact match is needed for correctness. */
289 #define check_match(flag) \
290 (opts_for_fn (caller->decl)->x_##flag \
291 != opts_for_fn (callee->decl)->x_##flag)
293 /* Decide if we can inline the edge and possibly update
294 inline_failed reason.
295 We check whether inlining is possible at all and whether
296 caller growth limits allow doing so.
298 if REPORT is true, output reason to the dump file.
300 if DISREGARD_LIMITS is true, ignore size limits.*/
302 static bool
303 can_inline_edge_p (struct cgraph_edge *e, bool report,
304 bool disregard_limits = false, bool early = false)
306 gcc_checking_assert (e->inline_failed);
308 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
310 if (report)
311 report_inline_failed_reason (e);
312 return false;
315 bool inlinable = true;
316 enum availability avail;
317 cgraph_node *caller = e->caller->global.inlined_to
318 ? e->caller->global.inlined_to : e->caller;
319 cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller);
320 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl);
321 tree callee_tree
322 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
324 if (!callee->definition)
326 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
327 inlinable = false;
329 else if (callee->calls_comdat_local)
331 e->inline_failed = CIF_USES_COMDAT_LOCAL;
332 inlinable = false;
334 else if (avail <= AVAIL_INTERPOSABLE)
336 e->inline_failed = CIF_OVERWRITABLE;
337 inlinable = false;
339 /* All edges with call_stmt_cannot_inline_p should have inline_failed
340 initialized to one of FINAL_ERROR reasons. */
341 else if (e->call_stmt_cannot_inline_p)
342 gcc_unreachable ();
343 /* Don't inline if the functions have different EH personalities. */
344 else if (DECL_FUNCTION_PERSONALITY (caller->decl)
345 && DECL_FUNCTION_PERSONALITY (callee->decl)
346 && (DECL_FUNCTION_PERSONALITY (caller->decl)
347 != DECL_FUNCTION_PERSONALITY (callee->decl)))
349 e->inline_failed = CIF_EH_PERSONALITY;
350 inlinable = false;
352 /* TM pure functions should not be inlined into non-TM_pure
353 functions. */
354 else if (is_tm_pure (callee->decl) && !is_tm_pure (caller->decl))
356 e->inline_failed = CIF_UNSPECIFIED;
357 inlinable = false;
359 /* Check compatibility of target optimization options. */
360 else if (!targetm.target_option.can_inline_p (caller->decl,
361 callee->decl))
363 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
364 inlinable = false;
366 else if (!inline_summaries->get (callee)->inlinable)
368 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
369 inlinable = false;
371 else if (inline_summaries->get (caller)->contains_cilk_spawn)
373 e->inline_failed = CIF_CILK_SPAWN;
374 inlinable = false;
376 /* Don't inline a function with mismatched sanitization attributes. */
377 else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl))
379 e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
380 inlinable = false;
382 /* Check if caller growth allows the inlining. */
383 else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
384 && !disregard_limits
385 && !lookup_attribute ("flatten",
386 DECL_ATTRIBUTES (caller->decl))
387 && !caller_growth_limits (e))
388 inlinable = false;
389 /* Don't inline a function with a higher optimization level than the
390 caller. FIXME: this is really just tip of iceberg of handling
391 optimization attribute. */
392 else if (caller_tree != callee_tree)
394 bool always_inline =
395 (DECL_DISREGARD_INLINE_LIMITS (callee->decl)
396 && lookup_attribute ("always_inline",
397 DECL_ATTRIBUTES (callee->decl)));
398 inline_summary *caller_info = inline_summaries->get (caller);
399 inline_summary *callee_info = inline_summaries->get (callee);
401 /* Until GCC 4.9 we did not check the semantics alterning flags
402 bellow and inline across optimization boundry.
403 Enabling checks bellow breaks several packages by refusing
404 to inline library always_inline functions. See PR65873.
405 Disable the check for early inlining for now until better solution
406 is found. */
407 if (always_inline && early)
409 /* There are some options that change IL semantics which means
410 we cannot inline in these cases for correctness reason.
411 Not even for always_inline declared functions. */
412 /* Strictly speaking only when the callee contains signed integer
413 math where overflow is undefined. */
414 else if ((check_maybe_up (flag_strict_overflow)
415 /* this flag is set by optimize. Allow inlining across
416 optimize boundary. */
417 && (!opt_for_fn (caller->decl, optimize)
418 == !opt_for_fn (callee->decl, optimize) || !always_inline))
419 || check_match (flag_wrapv)
420 || check_match (flag_trapv)
421 /* When caller or callee does FP math, be sure FP codegen flags
422 compatible. */
423 || ((caller_info->fp_expressions && callee_info->fp_expressions)
424 && (check_maybe_up (flag_rounding_math)
425 || check_maybe_up (flag_trapping_math)
426 || check_maybe_down (flag_unsafe_math_optimizations)
427 || check_maybe_down (flag_finite_math_only)
428 || check_maybe_up (flag_signaling_nans)
429 || check_maybe_down (flag_cx_limited_range)
430 || check_maybe_up (flag_signed_zeros)
431 || check_maybe_down (flag_associative_math)
432 || check_maybe_down (flag_reciprocal_math)
433 || check_maybe_down (flag_fp_int_builtin_inexact)
434 /* Strictly speaking only when the callee contains function
435 calls that may end up setting errno. */
436 || check_maybe_up (flag_errno_math)))
437 /* We do not want to make code compiled with exceptions to be
438 brought into a non-EH function unless we know that the callee
439 does not throw.
440 This is tracked by DECL_FUNCTION_PERSONALITY. */
441 || (check_maybe_up (flag_non_call_exceptions)
442 && DECL_FUNCTION_PERSONALITY (callee->decl))
443 || (check_maybe_up (flag_exceptions)
444 && DECL_FUNCTION_PERSONALITY (callee->decl))
445 /* When devirtualization is diabled for callee, it is not safe
446 to inline it as we possibly mangled the type info.
447 Allow early inlining of always inlines. */
448 || (!early && check_maybe_down (flag_devirtualize)))
450 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
451 inlinable = false;
453 /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */
454 else if (always_inline)
456 /* When user added an attribute to the callee honor it. */
457 else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee->decl))
458 && opts_for_fn (caller->decl) != opts_for_fn (callee->decl))
460 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
461 inlinable = false;
463 /* If explicit optimize attribute are not used, the mismatch is caused
464 by different command line options used to build different units.
465 Do not care about COMDAT functions - those are intended to be
466 optimized with the optimization flags of module they are used in.
467 Also do not care about mixing up size/speed optimization when
468 DECL_DISREGARD_INLINE_LIMITS is set. */
469 else if ((callee->merged_comdat
470 && !lookup_attribute ("optimize",
471 DECL_ATTRIBUTES (caller->decl)))
472 || DECL_DISREGARD_INLINE_LIMITS (callee->decl))
474 /* If mismatch is caused by merging two LTO units with different
475 optimizationflags we want to be bit nicer. However never inline
476 if one of functions is not optimized at all. */
477 else if (!opt_for_fn (callee->decl, optimize)
478 || !opt_for_fn (caller->decl, optimize))
480 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
481 inlinable = false;
483 /* If callee is optimized for size and caller is not, allow inlining if
484 code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee
485 is inline (and thus likely an unified comdat). This will allow caller
486 to run faster. */
487 else if (opt_for_fn (callee->decl, optimize_size)
488 > opt_for_fn (caller->decl, optimize_size))
490 int growth = estimate_edge_growth (e);
491 if (growth > 0
492 && (!DECL_DECLARED_INLINE_P (callee->decl)
493 && growth >= MAX (MAX_INLINE_INSNS_SINGLE,
494 MAX_INLINE_INSNS_AUTO)))
496 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
497 inlinable = false;
500 /* If callee is more aggressively optimized for performance than caller,
501 we generally want to inline only cheap (runtime wise) functions. */
502 else if (opt_for_fn (callee->decl, optimize_size)
503 < opt_for_fn (caller->decl, optimize_size)
504 || (opt_for_fn (callee->decl, optimize)
505 > opt_for_fn (caller->decl, optimize)))
507 if (estimate_edge_time (e)
508 >= 20 + inline_edge_summary (e)->call_stmt_time)
510 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
511 inlinable = false;
517 if (!inlinable && report)
518 report_inline_failed_reason (e);
519 return inlinable;
523 /* Return true if the edge E is inlinable during early inlining. */
525 static bool
526 can_early_inline_edge_p (struct cgraph_edge *e)
528 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
529 /* Early inliner might get called at WPA stage when IPA pass adds new
530 function. In this case we can not really do any of early inlining
531 because function bodies are missing. */
532 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
533 return false;
534 if (!gimple_has_body_p (callee->decl))
536 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
537 return false;
539 /* In early inliner some of callees may not be in SSA form yet
540 (i.e. the callgraph is cyclic and we did not process
541 the callee by early inliner, yet). We don't have CIF code for this
542 case; later we will re-do the decision in the real inliner. */
543 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
544 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
546 if (dump_file)
547 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
548 return false;
550 if (!can_inline_edge_p (e, true, false, true))
551 return false;
552 return true;
556 /* Return number of calls in N. Ignore cheap builtins. */
558 static int
559 num_calls (struct cgraph_node *n)
561 struct cgraph_edge *e;
562 int num = 0;
564 for (e = n->callees; e; e = e->next_callee)
565 if (!is_inexpensive_builtin (e->callee->decl))
566 num++;
567 return num;
571 /* Return true if we are interested in inlining small function. */
573 static bool
574 want_early_inline_function_p (struct cgraph_edge *e)
576 bool want_inline = true;
577 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
579 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
581 /* For AutoFDO, we need to make sure that before profile summary, all
582 hot paths' IR look exactly the same as profiled binary. As a result,
583 in einliner, we will disregard size limit and inline those callsites
584 that are:
585 * inlined in the profiled binary, and
586 * the cloned callee has enough samples to be considered "hot". */
587 else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e))
589 else if (!DECL_DECLARED_INLINE_P (callee->decl)
590 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
592 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
593 report_inline_failed_reason (e);
594 want_inline = false;
596 else
598 int growth = estimate_edge_growth (e);
599 int n;
601 if (growth <= 0)
603 else if (!e->maybe_hot_p ()
604 && growth > 0)
606 if (dump_file)
607 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
608 "call is cold and code would grow by %i\n",
609 xstrdup_for_dump (e->caller->name ()),
610 e->caller->order,
611 xstrdup_for_dump (callee->name ()), callee->order,
612 growth);
613 want_inline = false;
615 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
617 if (dump_file)
618 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
619 "growth %i exceeds --param early-inlining-insns\n",
620 xstrdup_for_dump (e->caller->name ()),
621 e->caller->order,
622 xstrdup_for_dump (callee->name ()), callee->order,
623 growth);
624 want_inline = false;
626 else if ((n = num_calls (callee)) != 0
627 && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
629 if (dump_file)
630 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
631 "growth %i exceeds --param early-inlining-insns "
632 "divided by number of calls\n",
633 xstrdup_for_dump (e->caller->name ()),
634 e->caller->order,
635 xstrdup_for_dump (callee->name ()), callee->order,
636 growth);
637 want_inline = false;
640 return want_inline;
643 /* Compute time of the edge->caller + edge->callee execution when inlining
644 does not happen. */
646 inline sreal
647 compute_uninlined_call_time (struct inline_summary *callee_info,
648 struct cgraph_edge *edge)
650 sreal uninlined_call_time = (sreal)callee_info->time;
651 cgraph_node *caller = (edge->caller->global.inlined_to
652 ? edge->caller->global.inlined_to
653 : edge->caller);
655 if (edge->count && caller->count)
656 uninlined_call_time *= (sreal)edge->count / caller->count;
657 if (edge->frequency)
658 uninlined_call_time *= cgraph_freq_base_rec * edge->frequency;
659 else
660 uninlined_call_time = uninlined_call_time >> 11;
662 int caller_time = inline_summaries->get (caller)->time;
663 return uninlined_call_time + caller_time;
666 /* Same as compute_uinlined_call_time but compute time when inlining
667 does happen. */
669 inline sreal
670 compute_inlined_call_time (struct cgraph_edge *edge,
671 int edge_time)
673 cgraph_node *caller = (edge->caller->global.inlined_to
674 ? edge->caller->global.inlined_to
675 : edge->caller);
676 int caller_time = inline_summaries->get (caller)->time;
677 sreal time = edge_time;
679 if (edge->count && caller->count)
680 time *= (sreal)edge->count / caller->count;
681 if (edge->frequency)
682 time *= cgraph_freq_base_rec * edge->frequency;
683 else
684 time = time >> 11;
686 /* This calculation should match one in ipa-inline-analysis.
687 FIXME: Once ipa-inline-analysis is converted to sreal this can be
688 simplified. */
689 time -= (sreal) ((gcov_type) edge->frequency
690 * inline_edge_summary (edge)->call_stmt_time
691 * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE)) / INLINE_TIME_SCALE;
692 time += caller_time;
693 if (time <= 0)
694 time = ((sreal) 1) >> 8;
695 gcc_checking_assert (time >= 0);
696 return time;
699 /* Return true if the speedup for inlining E is bigger than
700 PARAM_MAX_INLINE_MIN_SPEEDUP. */
702 static bool
703 big_speedup_p (struct cgraph_edge *e)
705 sreal time = compute_uninlined_call_time (inline_summaries->get (e->callee),
707 sreal inlined_time = compute_inlined_call_time (e, estimate_edge_time (e));
709 if (time - inlined_time
710 > (sreal) time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP)
711 * percent_rec)
712 return true;
713 return false;
716 /* Return true if we are interested in inlining small function.
717 When REPORT is true, report reason to dump file. */
719 static bool
720 want_inline_small_function_p (struct cgraph_edge *e, bool report)
722 bool want_inline = true;
723 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
725 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
727 else if (!DECL_DECLARED_INLINE_P (callee->decl)
728 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
730 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
731 want_inline = false;
733 /* Do fast and conservative check if the function can be good
734 inline candidate. At the moment we allow inline hints to
735 promote non-inline functions to inline and we increase
736 MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
737 else if ((!DECL_DECLARED_INLINE_P (callee->decl)
738 && (!e->count || !e->maybe_hot_p ()))
739 && inline_summaries->get (callee)->min_size
740 - inline_edge_summary (e)->call_stmt_size
741 > MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
743 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
744 want_inline = false;
746 else if ((DECL_DECLARED_INLINE_P (callee->decl) || e->count)
747 && inline_summaries->get (callee)->min_size
748 - inline_edge_summary (e)->call_stmt_size
749 > 16 * MAX_INLINE_INSNS_SINGLE)
751 e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
752 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
753 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
754 want_inline = false;
756 else
758 int growth = estimate_edge_growth (e);
759 inline_hints hints = estimate_edge_hints (e);
760 bool big_speedup = big_speedup_p (e);
762 if (growth <= 0)
764 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
765 hints suggests that inlining given function is very profitable. */
766 else if (DECL_DECLARED_INLINE_P (callee->decl)
767 && growth >= MAX_INLINE_INSNS_SINGLE
768 && ((!big_speedup
769 && !(hints & (INLINE_HINT_indirect_call
770 | INLINE_HINT_known_hot
771 | INLINE_HINT_loop_iterations
772 | INLINE_HINT_array_index
773 | INLINE_HINT_loop_stride)))
774 || growth >= MAX_INLINE_INSNS_SINGLE * 16))
776 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
777 want_inline = false;
779 else if (!DECL_DECLARED_INLINE_P (callee->decl)
780 && !opt_for_fn (e->caller->decl, flag_inline_functions))
782 /* growth_likely_positive is expensive, always test it last. */
783 if (growth >= MAX_INLINE_INSNS_SINGLE
784 || growth_likely_positive (callee, growth))
786 e->inline_failed = CIF_NOT_DECLARED_INLINED;
787 want_inline = false;
790 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
791 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
792 inlining given function is very profitable. */
793 else if (!DECL_DECLARED_INLINE_P (callee->decl)
794 && !big_speedup
795 && !(hints & INLINE_HINT_known_hot)
796 && growth >= ((hints & (INLINE_HINT_indirect_call
797 | INLINE_HINT_loop_iterations
798 | INLINE_HINT_array_index
799 | INLINE_HINT_loop_stride))
800 ? MAX (MAX_INLINE_INSNS_AUTO,
801 MAX_INLINE_INSNS_SINGLE)
802 : MAX_INLINE_INSNS_AUTO))
804 /* growth_likely_positive is expensive, always test it last. */
805 if (growth >= MAX_INLINE_INSNS_SINGLE
806 || growth_likely_positive (callee, growth))
808 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
809 want_inline = false;
812 /* If call is cold, do not inline when function body would grow. */
813 else if (!e->maybe_hot_p ()
814 && (growth >= MAX_INLINE_INSNS_SINGLE
815 || growth_likely_positive (callee, growth)))
817 e->inline_failed = CIF_UNLIKELY_CALL;
818 want_inline = false;
821 if (!want_inline && report)
822 report_inline_failed_reason (e);
823 return want_inline;
826 /* EDGE is self recursive edge.
827 We hand two cases - when function A is inlining into itself
828 or when function A is being inlined into another inliner copy of function
829 A within function B.
831 In first case OUTER_NODE points to the toplevel copy of A, while
832 in the second case OUTER_NODE points to the outermost copy of A in B.
834 In both cases we want to be extra selective since
835 inlining the call will just introduce new recursive calls to appear. */
837 static bool
838 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
839 struct cgraph_node *outer_node,
840 bool peeling,
841 int depth)
843 char const *reason = NULL;
844 bool want_inline = true;
845 int caller_freq = CGRAPH_FREQ_BASE;
846 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
848 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
849 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
851 if (!edge->maybe_hot_p ())
853 reason = "recursive call is cold";
854 want_inline = false;
856 else if (max_count && !outer_node->count)
858 reason = "not executed in profile";
859 want_inline = false;
861 else if (depth > max_depth)
863 reason = "--param max-inline-recursive-depth exceeded.";
864 want_inline = false;
867 if (outer_node->global.inlined_to)
868 caller_freq = outer_node->callers->frequency;
870 if (!caller_freq)
872 reason = "function is inlined and unlikely";
873 want_inline = false;
876 if (!want_inline)
878 /* Inlining of self recursive function into copy of itself within other function
879 is transformation similar to loop peeling.
881 Peeling is profitable if we can inline enough copies to make probability
882 of actual call to the self recursive function very small. Be sure that
883 the probability of recursion is small.
885 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
886 This way the expected number of recision is at most max_depth. */
887 else if (peeling)
889 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
890 / max_depth);
891 int i;
892 for (i = 1; i < depth; i++)
893 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
894 if (max_count
895 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
896 >= max_prob))
898 reason = "profile of recursive call is too large";
899 want_inline = false;
901 if (!max_count
902 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
903 >= max_prob))
905 reason = "frequency of recursive call is too large";
906 want_inline = false;
909 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
910 depth is large. We reduce function call overhead and increase chances that
911 things fit in hardware return predictor.
913 Recursive inlining might however increase cost of stack frame setup
914 actually slowing down functions whose recursion tree is wide rather than
915 deep.
917 Deciding reliably on when to do recursive inlining without profile feedback
918 is tricky. For now we disable recursive inlining when probability of self
919 recursion is low.
921 Recursive inlining of self recursive call within loop also results in large loop
922 depths that generally optimize badly. We may want to throttle down inlining
923 in those cases. In particular this seems to happen in one of libstdc++ rb tree
924 methods. */
925 else
927 if (max_count
928 && (edge->count * 100 / outer_node->count
929 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
931 reason = "profile of recursive call is too small";
932 want_inline = false;
934 else if (!max_count
935 && (edge->frequency * 100 / caller_freq
936 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
938 reason = "frequency of recursive call is too small";
939 want_inline = false;
942 if (!want_inline && dump_file)
943 fprintf (dump_file, " not inlining recursively: %s\n", reason);
944 return want_inline;
947 /* Return true when NODE has uninlinable caller;
948 set HAS_HOT_CALL if it has hot call.
949 Worker for cgraph_for_node_and_aliases. */
951 static bool
952 check_callers (struct cgraph_node *node, void *has_hot_call)
954 struct cgraph_edge *e;
955 for (e = node->callers; e; e = e->next_caller)
957 if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once))
958 return true;
959 if (!can_inline_edge_p (e, true))
960 return true;
961 if (e->recursive_p ())
962 return true;
963 if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
964 *(bool *)has_hot_call = true;
966 return false;
969 /* If NODE has a caller, return true. */
971 static bool
972 has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
974 if (node->callers)
975 return true;
976 return false;
979 /* Decide if inlining NODE would reduce unit size by eliminating
980 the offline copy of function.
981 When COLD is true the cold calls are considered, too. */
983 static bool
984 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
986 bool has_hot_call = false;
988 /* Aliases gets inlined along with the function they alias. */
989 if (node->alias)
990 return false;
991 /* Already inlined? */
992 if (node->global.inlined_to)
993 return false;
994 /* Does it have callers? */
995 if (!node->call_for_symbol_and_aliases (has_caller_p, NULL, true))
996 return false;
997 /* Inlining into all callers would increase size? */
998 if (estimate_growth (node) > 0)
999 return false;
1000 /* All inlines must be possible. */
1001 if (node->call_for_symbol_and_aliases (check_callers, &has_hot_call,
1002 true))
1003 return false;
1004 if (!cold && !has_hot_call)
1005 return false;
1006 return true;
1009 /* A cost model driving the inlining heuristics in a way so the edges with
1010 smallest badness are inlined first. After each inlining is performed
1011 the costs of all caller edges of nodes affected are recomputed so the
1012 metrics may accurately depend on values such as number of inlinable callers
1013 of the function or function body size. */
1015 static sreal
1016 edge_badness (struct cgraph_edge *edge, bool dump)
1018 sreal badness;
1019 int growth, edge_time;
1020 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1021 struct inline_summary *callee_info = inline_summaries->get (callee);
1022 inline_hints hints;
1023 cgraph_node *caller = (edge->caller->global.inlined_to
1024 ? edge->caller->global.inlined_to
1025 : edge->caller);
1027 growth = estimate_edge_growth (edge);
1028 edge_time = estimate_edge_time (edge);
1029 hints = estimate_edge_hints (edge);
1030 gcc_checking_assert (edge_time >= 0);
1031 gcc_checking_assert (edge_time <= callee_info->time);
1032 gcc_checking_assert (growth <= callee_info->size);
1034 if (dump)
1036 fprintf (dump_file, " Badness calculation for %s/%i -> %s/%i\n",
1037 xstrdup_for_dump (edge->caller->name ()),
1038 edge->caller->order,
1039 xstrdup_for_dump (callee->name ()),
1040 edge->callee->order);
1041 fprintf (dump_file, " size growth %i, time %i ",
1042 growth,
1043 edge_time);
1044 dump_inline_hints (dump_file, hints);
1045 if (big_speedup_p (edge))
1046 fprintf (dump_file, " big_speedup");
1047 fprintf (dump_file, "\n");
1050 /* Always prefer inlining saving code size. */
1051 if (growth <= 0)
1053 badness = (sreal) (-SREAL_MIN_SIG + growth) << (SREAL_MAX_EXP / 256);
1054 if (dump)
1055 fprintf (dump_file, " %f: Growth %d <= 0\n", badness.to_double (),
1056 growth);
1058 /* Inlining into EXTERNAL functions is not going to change anything unless
1059 they are themselves inlined. */
1060 else if (DECL_EXTERNAL (caller->decl))
1062 if (dump)
1063 fprintf (dump_file, " max: function is external\n");
1064 return sreal::max ();
1066 /* When profile is available. Compute badness as:
1068 time_saved * caller_count
1069 goodness = -------------------------------------------------
1070 growth_of_caller * overall_growth * combined_size
1072 badness = - goodness
1074 Again use negative value to make calls with profile appear hotter
1075 then calls without.
1077 else if (opt_for_fn (caller->decl, flag_guess_branch_prob) || caller->count)
1079 sreal numerator, denominator;
1080 int overall_growth;
1082 numerator = (compute_uninlined_call_time (callee_info, edge)
1083 - compute_inlined_call_time (edge, edge_time));
1084 if (numerator == 0)
1085 numerator = ((sreal) 1 >> 8);
1086 if (caller->count)
1087 numerator *= caller->count;
1088 else if (opt_for_fn (caller->decl, flag_branch_probabilities))
1089 numerator = numerator >> 11;
1090 denominator = growth;
1092 overall_growth = callee_info->growth;
1094 /* Look for inliner wrappers of the form:
1096 inline_caller ()
1098 do_fast_job...
1099 if (need_more_work)
1100 noninline_callee ();
1102 Withhout panilizing this case, we usually inline noninline_callee
1103 into the inline_caller because overall_growth is small preventing
1104 further inlining of inline_caller.
1106 Penalize only callgraph edges to functions with small overall
1107 growth ...
1109 if (growth > overall_growth
1110 /* ... and having only one caller which is not inlined ... */
1111 && callee_info->single_caller
1112 && !edge->caller->global.inlined_to
1113 /* ... and edges executed only conditionally ... */
1114 && edge->frequency < CGRAPH_FREQ_BASE
1115 /* ... consider case where callee is not inline but caller is ... */
1116 && ((!DECL_DECLARED_INLINE_P (edge->callee->decl)
1117 && DECL_DECLARED_INLINE_P (caller->decl))
1118 /* ... or when early optimizers decided to split and edge
1119 frequency still indicates splitting is a win ... */
1120 || (callee->split_part && !caller->split_part
1121 && edge->frequency
1122 < CGRAPH_FREQ_BASE
1123 * PARAM_VALUE
1124 (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY) / 100
1125 /* ... and do not overwrite user specified hints. */
1126 && (!DECL_DECLARED_INLINE_P (edge->callee->decl)
1127 || DECL_DECLARED_INLINE_P (caller->decl)))))
1129 struct inline_summary *caller_info = inline_summaries->get (caller);
1130 int caller_growth = caller_info->growth;
1132 /* Only apply the penalty when caller looks like inline candidate,
1133 and it is not called once and. */
1134 if (!caller_info->single_caller && overall_growth < caller_growth
1135 && caller_info->inlinable
1136 && caller_info->size
1137 < (DECL_DECLARED_INLINE_P (caller->decl)
1138 ? MAX_INLINE_INSNS_SINGLE : MAX_INLINE_INSNS_AUTO))
1140 if (dump)
1141 fprintf (dump_file,
1142 " Wrapper penalty. Increasing growth %i to %i\n",
1143 overall_growth, caller_growth);
1144 overall_growth = caller_growth;
1147 if (overall_growth > 0)
1149 /* Strongly preffer functions with few callers that can be inlined
1150 fully. The square root here leads to smaller binaries at average.
1151 Watch however for extreme cases and return to linear function
1152 when growth is large. */
1153 if (overall_growth < 256)
1154 overall_growth *= overall_growth;
1155 else
1156 overall_growth += 256 * 256 - 256;
1157 denominator *= overall_growth;
1159 denominator *= inline_summaries->get (caller)->self_size + growth;
1161 badness = - numerator / denominator;
1163 if (dump)
1165 fprintf (dump_file,
1166 " %f: guessed profile. frequency %f, count %" PRId64
1167 " caller count %" PRId64
1168 " time w/o inlining %f, time w/ inlining %f"
1169 " overall growth %i (current) %i (original)"
1170 " %i (compensated)\n",
1171 badness.to_double (),
1172 (double)edge->frequency / CGRAPH_FREQ_BASE,
1173 edge->count, caller->count,
1174 compute_uninlined_call_time (callee_info, edge).to_double (),
1175 compute_inlined_call_time (edge, edge_time).to_double (),
1176 estimate_growth (callee),
1177 callee_info->growth, overall_growth);
1180 /* When function local profile is not available or it does not give
1181 useful information (ie frequency is zero), base the cost on
1182 loop nest and overall size growth, so we optimize for overall number
1183 of functions fully inlined in program. */
1184 else
1186 int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
1187 badness = growth;
1189 /* Decrease badness if call is nested. */
1190 if (badness > 0)
1191 badness = badness >> nest;
1192 else
1193 badness = badness << nest;
1194 if (dump)
1195 fprintf (dump_file, " %f: no profile. nest %i\n",
1196 badness.to_double (), nest);
1198 gcc_checking_assert (badness != 0);
1200 if (edge->recursive_p ())
1201 badness = badness.shift (badness > 0 ? 4 : -4);
1202 if ((hints & (INLINE_HINT_indirect_call
1203 | INLINE_HINT_loop_iterations
1204 | INLINE_HINT_array_index
1205 | INLINE_HINT_loop_stride))
1206 || callee_info->growth <= 0)
1207 badness = badness.shift (badness > 0 ? -2 : 2);
1208 if (hints & (INLINE_HINT_same_scc))
1209 badness = badness.shift (badness > 0 ? 3 : -3);
1210 else if (hints & (INLINE_HINT_in_scc))
1211 badness = badness.shift (badness > 0 ? 2 : -2);
1212 else if (hints & (INLINE_HINT_cross_module))
1213 badness = badness.shift (badness > 0 ? 1 : -1);
1214 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1215 badness = badness.shift (badness > 0 ? -4 : 4);
1216 else if ((hints & INLINE_HINT_declared_inline))
1217 badness = badness.shift (badness > 0 ? -3 : 3);
1218 if (dump)
1219 fprintf (dump_file, " Adjusted by hints %f\n", badness.to_double ());
1220 return badness;
1223 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1224 static inline void
1225 update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
1227 sreal badness = edge_badness (edge, false);
1228 if (edge->aux)
1230 edge_heap_node_t *n = (edge_heap_node_t *) edge->aux;
1231 gcc_checking_assert (n->get_data () == edge);
1233 /* fibonacci_heap::replace_key does busy updating of the
1234 heap that is unnecesarily expensive.
1235 We do lazy increases: after extracting minimum if the key
1236 turns out to be out of date, it is re-inserted into heap
1237 with correct value. */
1238 if (badness < n->get_key ())
1240 if (dump_file && (dump_flags & TDF_DETAILS))
1242 fprintf (dump_file,
1243 " decreasing badness %s/%i -> %s/%i, %f"
1244 " to %f\n",
1245 xstrdup_for_dump (edge->caller->name ()),
1246 edge->caller->order,
1247 xstrdup_for_dump (edge->callee->name ()),
1248 edge->callee->order,
1249 n->get_key ().to_double (),
1250 badness.to_double ());
1252 heap->decrease_key (n, badness);
1255 else
1257 if (dump_file && (dump_flags & TDF_DETAILS))
1259 fprintf (dump_file,
1260 " enqueuing call %s/%i -> %s/%i, badness %f\n",
1261 xstrdup_for_dump (edge->caller->name ()),
1262 edge->caller->order,
1263 xstrdup_for_dump (edge->callee->name ()),
1264 edge->callee->order,
1265 badness.to_double ());
1267 edge->aux = heap->insert (badness, edge);
1272 /* NODE was inlined.
1273 All caller edges needs to be resetted because
1274 size estimates change. Similarly callees needs reset
1275 because better context may be known. */
1277 static void
1278 reset_edge_caches (struct cgraph_node *node)
1280 struct cgraph_edge *edge;
1281 struct cgraph_edge *e = node->callees;
1282 struct cgraph_node *where = node;
1283 struct ipa_ref *ref;
1285 if (where->global.inlined_to)
1286 where = where->global.inlined_to;
1288 for (edge = where->callers; edge; edge = edge->next_caller)
1289 if (edge->inline_failed)
1290 reset_edge_growth_cache (edge);
1292 FOR_EACH_ALIAS (where, ref)
1293 reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
1295 if (!e)
1296 return;
1298 while (true)
1299 if (!e->inline_failed && e->callee->callees)
1300 e = e->callee->callees;
1301 else
1303 if (e->inline_failed)
1304 reset_edge_growth_cache (e);
1305 if (e->next_callee)
1306 e = e->next_callee;
1307 else
1311 if (e->caller == node)
1312 return;
1313 e = e->caller->callers;
1315 while (!e->next_callee);
1316 e = e->next_callee;
1321 /* Recompute HEAP nodes for each of caller of NODE.
1322 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1323 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1324 it is inlinable. Otherwise check all edges. */
1326 static void
1327 update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
1328 bitmap updated_nodes,
1329 struct cgraph_edge *check_inlinablity_for)
1331 struct cgraph_edge *edge;
1332 struct ipa_ref *ref;
1334 if ((!node->alias && !inline_summaries->get (node)->inlinable)
1335 || node->global.inlined_to)
1336 return;
1337 if (!bitmap_set_bit (updated_nodes, node->uid))
1338 return;
1340 FOR_EACH_ALIAS (node, ref)
1342 struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
1343 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1346 for (edge = node->callers; edge; edge = edge->next_caller)
1347 if (edge->inline_failed)
1349 if (!check_inlinablity_for
1350 || check_inlinablity_for == edge)
1352 if (can_inline_edge_p (edge, false)
1353 && want_inline_small_function_p (edge, false))
1354 update_edge_key (heap, edge);
1355 else if (edge->aux)
1357 report_inline_failed_reason (edge);
1358 heap->delete_node ((edge_heap_node_t *) edge->aux);
1359 edge->aux = NULL;
1362 else if (edge->aux)
1363 update_edge_key (heap, edge);
1367 /* Recompute HEAP nodes for each uninlined call in NODE.
1368 This is used when we know that edge badnesses are going only to increase
1369 (we introduced new call site) and thus all we need is to insert newly
1370 created edges into heap. */
1372 static void
1373 update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
1374 bitmap updated_nodes)
1376 struct cgraph_edge *e = node->callees;
1378 if (!e)
1379 return;
1380 while (true)
1381 if (!e->inline_failed && e->callee->callees)
1382 e = e->callee->callees;
1383 else
1385 enum availability avail;
1386 struct cgraph_node *callee;
1387 /* We do not reset callee growth cache here. Since we added a new call,
1388 growth chould have just increased and consequentely badness metric
1389 don't need updating. */
1390 if (e->inline_failed
1391 && (callee = e->callee->ultimate_alias_target (&avail, e->caller))
1392 && inline_summaries->get (callee)->inlinable
1393 && avail >= AVAIL_AVAILABLE
1394 && !bitmap_bit_p (updated_nodes, callee->uid))
1396 if (can_inline_edge_p (e, false)
1397 && want_inline_small_function_p (e, false))
1398 update_edge_key (heap, e);
1399 else if (e->aux)
1401 report_inline_failed_reason (e);
1402 heap->delete_node ((edge_heap_node_t *) e->aux);
1403 e->aux = NULL;
1406 if (e->next_callee)
1407 e = e->next_callee;
1408 else
1412 if (e->caller == node)
1413 return;
1414 e = e->caller->callers;
1416 while (!e->next_callee);
1417 e = e->next_callee;
1422 /* Enqueue all recursive calls from NODE into priority queue depending on
1423 how likely we want to recursively inline the call. */
1425 static void
1426 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1427 edge_heap_t *heap)
1429 struct cgraph_edge *e;
1430 enum availability avail;
1432 for (e = where->callees; e; e = e->next_callee)
1433 if (e->callee == node
1434 || (e->callee->ultimate_alias_target (&avail, e->caller) == node
1435 && avail > AVAIL_INTERPOSABLE))
1437 /* When profile feedback is available, prioritize by expected number
1438 of calls. */
1439 heap->insert (!max_count ? -e->frequency
1440 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
1443 for (e = where->callees; e; e = e->next_callee)
1444 if (!e->inline_failed)
1445 lookup_recursive_calls (node, e->callee, heap);
1448 /* Decide on recursive inlining: in the case function has recursive calls,
1449 inline until body size reaches given argument. If any new indirect edges
1450 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1451 is NULL. */
1453 static bool
1454 recursive_inlining (struct cgraph_edge *edge,
1455 vec<cgraph_edge *> *new_edges)
1457 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1458 edge_heap_t heap (sreal::min ());
1459 struct cgraph_node *node;
1460 struct cgraph_edge *e;
1461 struct cgraph_node *master_clone = NULL, *next;
1462 int depth = 0;
1463 int n = 0;
1465 node = edge->caller;
1466 if (node->global.inlined_to)
1467 node = node->global.inlined_to;
1469 if (DECL_DECLARED_INLINE_P (node->decl))
1470 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1472 /* Make sure that function is small enough to be considered for inlining. */
1473 if (estimate_size_after_inlining (node, edge) >= limit)
1474 return false;
1475 lookup_recursive_calls (node, node, &heap);
1476 if (heap.empty ())
1477 return false;
1479 if (dump_file)
1480 fprintf (dump_file,
1481 " Performing recursive inlining on %s\n",
1482 node->name ());
1484 /* Do the inlining and update list of recursive call during process. */
1485 while (!heap.empty ())
1487 struct cgraph_edge *curr = heap.extract_min ();
1488 struct cgraph_node *cnode, *dest = curr->callee;
1490 if (!can_inline_edge_p (curr, true))
1491 continue;
1493 /* MASTER_CLONE is produced in the case we already started modified
1494 the function. Be sure to redirect edge to the original body before
1495 estimating growths otherwise we will be seeing growths after inlining
1496 the already modified body. */
1497 if (master_clone)
1499 curr->redirect_callee (master_clone);
1500 reset_edge_growth_cache (curr);
1503 if (estimate_size_after_inlining (node, curr) > limit)
1505 curr->redirect_callee (dest);
1506 reset_edge_growth_cache (curr);
1507 break;
1510 depth = 1;
1511 for (cnode = curr->caller;
1512 cnode->global.inlined_to; cnode = cnode->callers->caller)
1513 if (node->decl
1514 == curr->callee->ultimate_alias_target ()->decl)
1515 depth++;
1517 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1519 curr->redirect_callee (dest);
1520 reset_edge_growth_cache (curr);
1521 continue;
1524 if (dump_file)
1526 fprintf (dump_file,
1527 " Inlining call of depth %i", depth);
1528 if (node->count)
1530 fprintf (dump_file, " called approx. %.2f times per call",
1531 (double)curr->count / node->count);
1533 fprintf (dump_file, "\n");
1535 if (!master_clone)
1537 /* We need original clone to copy around. */
1538 master_clone = node->create_clone (node->decl, node->count,
1539 CGRAPH_FREQ_BASE, false, vNULL,
1540 true, NULL, NULL);
1541 for (e = master_clone->callees; e; e = e->next_callee)
1542 if (!e->inline_failed)
1543 clone_inlined_nodes (e, true, false, NULL, CGRAPH_FREQ_BASE);
1544 curr->redirect_callee (master_clone);
1545 reset_edge_growth_cache (curr);
1548 inline_call (curr, false, new_edges, &overall_size, true);
1549 lookup_recursive_calls (node, curr->callee, &heap);
1550 n++;
1553 if (!heap.empty () && dump_file)
1554 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1556 if (!master_clone)
1557 return false;
1559 if (dump_file)
1560 fprintf (dump_file,
1561 "\n Inlined %i times, "
1562 "body grown from size %i to %i, time %i to %i\n", n,
1563 inline_summaries->get (master_clone)->size, inline_summaries->get (node)->size,
1564 inline_summaries->get (master_clone)->time, inline_summaries->get (node)->time);
1566 /* Remove master clone we used for inlining. We rely that clones inlined
1567 into master clone gets queued just before master clone so we don't
1568 need recursion. */
1569 for (node = symtab->first_function (); node != master_clone;
1570 node = next)
1572 next = symtab->next_function (node);
1573 if (node->global.inlined_to == master_clone)
1574 node->remove ();
1576 master_clone->remove ();
1577 return true;
1581 /* Given whole compilation unit estimate of INSNS, compute how large we can
1582 allow the unit to grow. */
1584 static int
1585 compute_max_insns (int insns)
1587 int max_insns = insns;
1588 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1589 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1591 return ((int64_t) max_insns
1592 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1596 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1598 static void
1599 add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges)
1601 while (new_edges.length () > 0)
1603 struct cgraph_edge *edge = new_edges.pop ();
1605 gcc_assert (!edge->aux);
1606 if (edge->inline_failed
1607 && can_inline_edge_p (edge, true)
1608 && want_inline_small_function_p (edge, true))
1609 edge->aux = heap->insert (edge_badness (edge, false), edge);
1613 /* Remove EDGE from the fibheap. */
1615 static void
1616 heap_edge_removal_hook (struct cgraph_edge *e, void *data)
1618 if (e->aux)
1620 ((edge_heap_t *)data)->delete_node ((edge_heap_node_t *)e->aux);
1621 e->aux = NULL;
1625 /* Return true if speculation of edge E seems useful.
1626 If ANTICIPATE_INLINING is true, be conservative and hope that E
1627 may get inlined. */
1629 bool
1630 speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
1632 enum availability avail;
1633 struct cgraph_node *target = e->callee->ultimate_alias_target (&avail,
1634 e->caller);
1635 struct cgraph_edge *direct, *indirect;
1636 struct ipa_ref *ref;
1638 gcc_assert (e->speculative && !e->indirect_unknown_callee);
1640 if (!e->maybe_hot_p ())
1641 return false;
1643 /* See if IP optimizations found something potentially useful about the
1644 function. For now we look only for CONST/PURE flags. Almost everything
1645 else we propagate is useless. */
1646 if (avail >= AVAIL_AVAILABLE)
1648 int ecf_flags = flags_from_decl_or_type (target->decl);
1649 if (ecf_flags & ECF_CONST)
1651 e->speculative_call_info (direct, indirect, ref);
1652 if (!(indirect->indirect_info->ecf_flags & ECF_CONST))
1653 return true;
1655 else if (ecf_flags & ECF_PURE)
1657 e->speculative_call_info (direct, indirect, ref);
1658 if (!(indirect->indirect_info->ecf_flags & ECF_PURE))
1659 return true;
1662 /* If we did not managed to inline the function nor redirect
1663 to an ipa-cp clone (that are seen by having local flag set),
1664 it is probably pointless to inline it unless hardware is missing
1665 indirect call predictor. */
1666 if (!anticipate_inlining && e->inline_failed && !target->local.local)
1667 return false;
1668 /* For overwritable targets there is not much to do. */
1669 if (e->inline_failed && !can_inline_edge_p (e, false, true))
1670 return false;
1671 /* OK, speculation seems interesting. */
1672 return true;
1675 /* We know that EDGE is not going to be inlined.
1676 See if we can remove speculation. */
1678 static void
1679 resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
1681 if (edge->speculative && !speculation_useful_p (edge, false))
1683 struct cgraph_node *node = edge->caller;
1684 struct cgraph_node *where = node->global.inlined_to
1685 ? node->global.inlined_to : node;
1686 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1688 spec_rem += edge->count;
1689 edge->resolve_speculation ();
1690 reset_edge_caches (where);
1691 inline_update_overall_summary (where);
1692 update_caller_keys (edge_heap, where,
1693 updated_nodes, NULL);
1694 update_callee_keys (edge_heap, where,
1695 updated_nodes);
1696 BITMAP_FREE (updated_nodes);
1700 /* Return true if NODE should be accounted for overall size estimate.
1701 Skip all nodes optimized for size so we can measure the growth of hot
1702 part of program no matter of the padding. */
1704 bool
1705 inline_account_function_p (struct cgraph_node *node)
1707 return (!DECL_EXTERNAL (node->decl)
1708 && !opt_for_fn (node->decl, optimize_size)
1709 && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED);
1712 /* Count number of callers of NODE and store it into DATA (that
1713 points to int. Worker for cgraph_for_node_and_aliases. */
1715 static bool
1716 sum_callers (struct cgraph_node *node, void *data)
1718 struct cgraph_edge *e;
1719 int *num_calls = (int *)data;
1721 for (e = node->callers; e; e = e->next_caller)
1722 (*num_calls)++;
1723 return false;
1726 /* We use greedy algorithm for inlining of small functions:
1727 All inline candidates are put into prioritized heap ordered in
1728 increasing badness.
1730 The inlining of small functions is bounded by unit growth parameters. */
1732 static void
1733 inline_small_functions (void)
1735 struct cgraph_node *node;
1736 struct cgraph_edge *edge;
1737 edge_heap_t edge_heap (sreal::min ());
1738 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1739 int min_size, max_size;
1740 auto_vec<cgraph_edge *> new_indirect_edges;
1741 int initial_size = 0;
1742 struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
1743 struct cgraph_edge_hook_list *edge_removal_hook_holder;
1744 new_indirect_edges.create (8);
1746 edge_removal_hook_holder
1747 = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap);
1749 /* Compute overall unit size and other global parameters used by badness
1750 metrics. */
1752 max_count = 0;
1753 ipa_reduced_postorder (order, true, true, NULL);
1754 free (order);
1756 FOR_EACH_DEFINED_FUNCTION (node)
1757 if (!node->global.inlined_to)
1759 if (!node->alias && node->analyzed
1760 && (node->has_gimple_body_p () || node->thunk.thunk_p))
1762 struct inline_summary *info = inline_summaries->get (node);
1763 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
1765 /* Do not account external functions, they will be optimized out
1766 if not inlined. Also only count the non-cold portion of program. */
1767 if (inline_account_function_p (node))
1768 initial_size += info->size;
1769 info->growth = estimate_growth (node);
1771 int num_calls = 0;
1772 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
1773 true);
1774 if (num_calls == 1)
1775 info->single_caller = true;
1776 if (dfs && dfs->next_cycle)
1778 struct cgraph_node *n2;
1779 int id = dfs->scc_no + 1;
1780 for (n2 = node; n2;
1781 n2 = ((struct ipa_dfs_info *) node->aux)->next_cycle)
1783 struct inline_summary *info2 = inline_summaries->get (n2);
1784 if (info2->scc_no)
1785 break;
1786 info2->scc_no = id;
1791 for (edge = node->callers; edge; edge = edge->next_caller)
1792 if (max_count < edge->count)
1793 max_count = edge->count;
1795 ipa_free_postorder_info ();
1796 initialize_growth_caches ();
1798 if (dump_file)
1799 fprintf (dump_file,
1800 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1801 initial_size);
1803 overall_size = initial_size;
1804 max_size = compute_max_insns (overall_size);
1805 min_size = overall_size;
1807 /* Populate the heap with all edges we might inline. */
1809 FOR_EACH_DEFINED_FUNCTION (node)
1811 bool update = false;
1812 struct cgraph_edge *next = NULL;
1813 bool has_speculative = false;
1815 if (dump_file)
1816 fprintf (dump_file, "Enqueueing calls in %s/%i.\n",
1817 node->name (), node->order);
1819 for (edge = node->callees; edge; edge = next)
1821 next = edge->next_callee;
1822 if (edge->inline_failed
1823 && !edge->aux
1824 && can_inline_edge_p (edge, true)
1825 && want_inline_small_function_p (edge, true)
1826 && edge->inline_failed)
1828 gcc_assert (!edge->aux);
1829 update_edge_key (&edge_heap, edge);
1831 if (edge->speculative)
1832 has_speculative = true;
1834 if (has_speculative)
1835 for (edge = node->callees; edge; edge = next)
1836 if (edge->speculative && !speculation_useful_p (edge,
1837 edge->aux != NULL))
1839 edge->resolve_speculation ();
1840 update = true;
1842 if (update)
1844 struct cgraph_node *where = node->global.inlined_to
1845 ? node->global.inlined_to : node;
1846 inline_update_overall_summary (where);
1847 reset_edge_caches (where);
1848 update_caller_keys (&edge_heap, where,
1849 updated_nodes, NULL);
1850 update_callee_keys (&edge_heap, where,
1851 updated_nodes);
1852 bitmap_clear (updated_nodes);
1856 gcc_assert (in_lto_p
1857 || !max_count
1858 || (profile_info && flag_branch_probabilities));
1860 while (!edge_heap.empty ())
1862 int old_size = overall_size;
1863 struct cgraph_node *where, *callee;
1864 sreal badness = edge_heap.min_key ();
1865 sreal current_badness;
1866 int growth;
1868 edge = edge_heap.extract_min ();
1869 gcc_assert (edge->aux);
1870 edge->aux = NULL;
1871 if (!edge->inline_failed || !edge->callee->analyzed)
1872 continue;
1874 #if CHECKING_P
1875 /* Be sure that caches are maintained consistent. */
1876 sreal cached_badness = edge_badness (edge, false);
1878 int old_size_est = estimate_edge_size (edge);
1879 int old_time_est = estimate_edge_time (edge);
1880 int old_hints_est = estimate_edge_hints (edge);
1882 reset_edge_growth_cache (edge);
1883 gcc_assert (old_size_est == estimate_edge_size (edge));
1884 gcc_assert (old_time_est == estimate_edge_time (edge));
1885 /* FIXME:
1887 gcc_assert (old_hints_est == estimate_edge_hints (edge));
1889 fails with profile feedback because some hints depends on
1890 maybe_hot_edge_p predicate and because callee gets inlined to other
1891 calls, the edge may become cold.
1892 This ought to be fixed by computing relative probabilities
1893 for given invocation but that will be better done once whole
1894 code is converted to sreals. Disable for now and revert to "wrong"
1895 value so enable/disable checking paths agree. */
1896 edge_growth_cache[edge->uid].hints = old_hints_est + 1;
1898 /* When updating the edge costs, we only decrease badness in the keys.
1899 Increases of badness are handled lazilly; when we see key with out
1900 of date value on it, we re-insert it now. */
1901 current_badness = edge_badness (edge, false);
1902 /* Disable checking for profile because roundoff errors may cause slight
1903 deviations in the order. */
1904 gcc_assert (max_count || cached_badness == current_badness);
1905 gcc_assert (current_badness >= badness);
1906 #else
1907 current_badness = edge_badness (edge, false);
1908 #endif
1909 if (current_badness != badness)
1911 if (edge_heap.min () && current_badness > edge_heap.min_key ())
1913 edge->aux = edge_heap.insert (current_badness, edge);
1914 continue;
1916 else
1917 badness = current_badness;
1920 if (!can_inline_edge_p (edge, true))
1922 resolve_noninline_speculation (&edge_heap, edge);
1923 continue;
1926 callee = edge->callee->ultimate_alias_target ();
1927 growth = estimate_edge_growth (edge);
1928 if (dump_file)
1930 fprintf (dump_file,
1931 "\nConsidering %s/%i with %i size\n",
1932 callee->name (), callee->order,
1933 inline_summaries->get (callee)->size);
1934 fprintf (dump_file,
1935 " to be inlined into %s/%i in %s:%i\n"
1936 " Estimated badness is %f, frequency %.2f.\n",
1937 edge->caller->name (), edge->caller->order,
1938 edge->call_stmt
1939 && (LOCATION_LOCUS (gimple_location ((const gimple *)
1940 edge->call_stmt))
1941 > BUILTINS_LOCATION)
1942 ? gimple_filename ((const gimple *) edge->call_stmt)
1943 : "unknown",
1944 edge->call_stmt
1945 ? gimple_lineno ((const gimple *) edge->call_stmt)
1946 : -1,
1947 badness.to_double (),
1948 edge->frequency / (double)CGRAPH_FREQ_BASE);
1949 if (edge->count)
1950 fprintf (dump_file," Called %" PRId64"x\n",
1951 edge->count);
1952 if (dump_flags & TDF_DETAILS)
1953 edge_badness (edge, true);
1956 if (overall_size + growth > max_size
1957 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1959 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1960 report_inline_failed_reason (edge);
1961 resolve_noninline_speculation (&edge_heap, edge);
1962 continue;
1965 if (!want_inline_small_function_p (edge, true))
1967 resolve_noninline_speculation (&edge_heap, edge);
1968 continue;
1971 /* Heuristics for inlining small functions work poorly for
1972 recursive calls where we do effects similar to loop unrolling.
1973 When inlining such edge seems profitable, leave decision on
1974 specific inliner. */
1975 if (edge->recursive_p ())
1977 where = edge->caller;
1978 if (where->global.inlined_to)
1979 where = where->global.inlined_to;
1980 if (!recursive_inlining (edge,
1981 opt_for_fn (edge->caller->decl,
1982 flag_indirect_inlining)
1983 ? &new_indirect_edges : NULL))
1985 edge->inline_failed = CIF_RECURSIVE_INLINING;
1986 resolve_noninline_speculation (&edge_heap, edge);
1987 continue;
1989 reset_edge_caches (where);
1990 /* Recursive inliner inlines all recursive calls of the function
1991 at once. Consequently we need to update all callee keys. */
1992 if (opt_for_fn (edge->caller->decl, flag_indirect_inlining))
1993 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
1994 update_callee_keys (&edge_heap, where, updated_nodes);
1995 bitmap_clear (updated_nodes);
1997 else
1999 struct cgraph_node *outer_node = NULL;
2000 int depth = 0;
2002 /* Consider the case where self recursive function A is inlined
2003 into B. This is desired optimization in some cases, since it
2004 leads to effect similar of loop peeling and we might completely
2005 optimize out the recursive call. However we must be extra
2006 selective. */
2008 where = edge->caller;
2009 while (where->global.inlined_to)
2011 if (where->decl == callee->decl)
2012 outer_node = where, depth++;
2013 where = where->callers->caller;
2015 if (outer_node
2016 && !want_inline_self_recursive_call_p (edge, outer_node,
2017 true, depth))
2019 edge->inline_failed
2020 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
2021 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
2022 resolve_noninline_speculation (&edge_heap, edge);
2023 continue;
2025 else if (depth && dump_file)
2026 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
2028 gcc_checking_assert (!callee->global.inlined_to);
2029 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
2030 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
2032 reset_edge_caches (edge->callee);
2034 update_callee_keys (&edge_heap, where, updated_nodes);
2036 where = edge->caller;
2037 if (where->global.inlined_to)
2038 where = where->global.inlined_to;
2040 /* Our profitability metric can depend on local properties
2041 such as number of inlinable calls and size of the function body.
2042 After inlining these properties might change for the function we
2043 inlined into (since it's body size changed) and for the functions
2044 called by function we inlined (since number of it inlinable callers
2045 might change). */
2046 update_caller_keys (&edge_heap, where, updated_nodes, NULL);
2047 /* Offline copy count has possibly changed, recompute if profile is
2048 available. */
2049 if (max_count)
2051 struct cgraph_node *n = cgraph_node::get (edge->callee->decl);
2052 if (n != edge->callee && n->analyzed)
2053 update_callee_keys (&edge_heap, n, updated_nodes);
2055 bitmap_clear (updated_nodes);
2057 if (dump_file)
2059 fprintf (dump_file,
2060 " Inlined into %s which now has time %i and size %i,"
2061 "net change of %+i.\n",
2062 edge->caller->name (),
2063 inline_summaries->get (edge->caller)->time,
2064 inline_summaries->get (edge->caller)->size,
2065 overall_size - old_size);
2067 if (min_size > overall_size)
2069 min_size = overall_size;
2070 max_size = compute_max_insns (min_size);
2072 if (dump_file)
2073 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
2077 free_growth_caches ();
2078 if (dump_file)
2079 fprintf (dump_file,
2080 "Unit growth for small function inlining: %i->%i (%i%%)\n",
2081 initial_size, overall_size,
2082 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
2083 BITMAP_FREE (updated_nodes);
2084 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
2087 /* Flatten NODE. Performed both during early inlining and
2088 at IPA inlining time. */
2090 static void
2091 flatten_function (struct cgraph_node *node, bool early)
2093 struct cgraph_edge *e;
2095 /* We shouldn't be called recursively when we are being processed. */
2096 gcc_assert (node->aux == NULL);
2098 node->aux = (void *) node;
2100 for (e = node->callees; e; e = e->next_callee)
2102 struct cgraph_node *orig_callee;
2103 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2105 /* We've hit cycle? It is time to give up. */
2106 if (callee->aux)
2108 if (dump_file)
2109 fprintf (dump_file,
2110 "Not inlining %s into %s to avoid cycle.\n",
2111 xstrdup_for_dump (callee->name ()),
2112 xstrdup_for_dump (e->caller->name ()));
2113 e->inline_failed = CIF_RECURSIVE_INLINING;
2114 continue;
2117 /* When the edge is already inlined, we just need to recurse into
2118 it in order to fully flatten the leaves. */
2119 if (!e->inline_failed)
2121 flatten_function (callee, early);
2122 continue;
2125 /* Flatten attribute needs to be processed during late inlining. For
2126 extra code quality we however do flattening during early optimization,
2127 too. */
2128 if (!early
2129 ? !can_inline_edge_p (e, true)
2130 : !can_early_inline_edge_p (e))
2131 continue;
2133 if (e->recursive_p ())
2135 if (dump_file)
2136 fprintf (dump_file, "Not inlining: recursive call.\n");
2137 continue;
2140 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
2141 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
2143 if (dump_file)
2144 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
2145 continue;
2148 /* Inline the edge and flatten the inline clone. Avoid
2149 recursing through the original node if the node was cloned. */
2150 if (dump_file)
2151 fprintf (dump_file, " Inlining %s into %s.\n",
2152 xstrdup_for_dump (callee->name ()),
2153 xstrdup_for_dump (e->caller->name ()));
2154 orig_callee = callee;
2155 inline_call (e, true, NULL, NULL, false);
2156 if (e->callee != orig_callee)
2157 orig_callee->aux = (void *) node;
2158 flatten_function (e->callee, early);
2159 if (e->callee != orig_callee)
2160 orig_callee->aux = NULL;
2163 node->aux = NULL;
2164 if (!node->global.inlined_to)
2165 inline_update_overall_summary (node);
2168 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
2169 DATA points to number of calls originally found so we avoid infinite
2170 recursion. */
2172 static bool
2173 inline_to_all_callers_1 (struct cgraph_node *node, void *data,
2174 hash_set<cgraph_node *> *callers)
2176 int *num_calls = (int *)data;
2177 bool callee_removed = false;
2179 while (node->callers && !node->global.inlined_to)
2181 struct cgraph_node *caller = node->callers->caller;
2183 if (!can_inline_edge_p (node->callers, true)
2184 || node->callers->recursive_p ())
2186 if (dump_file)
2187 fprintf (dump_file, "Uninlinable call found; giving up.\n");
2188 *num_calls = 0;
2189 return false;
2192 if (dump_file)
2194 fprintf (dump_file,
2195 "\nInlining %s size %i.\n",
2196 node->name (),
2197 inline_summaries->get (node)->size);
2198 fprintf (dump_file,
2199 " Called once from %s %i insns.\n",
2200 node->callers->caller->name (),
2201 inline_summaries->get (node->callers->caller)->size);
2204 /* Remember which callers we inlined to, delaying updating the
2205 overall summary. */
2206 callers->add (node->callers->caller);
2207 inline_call (node->callers, true, NULL, NULL, false, &callee_removed);
2208 if (dump_file)
2209 fprintf (dump_file,
2210 " Inlined into %s which now has %i size\n",
2211 caller->name (),
2212 inline_summaries->get (caller)->size);
2213 if (!(*num_calls)--)
2215 if (dump_file)
2216 fprintf (dump_file, "New calls found; giving up.\n");
2217 return callee_removed;
2219 if (callee_removed)
2220 return true;
2222 return false;
2225 /* Wrapper around inline_to_all_callers_1 doing delayed overall summary
2226 update. */
2228 static bool
2229 inline_to_all_callers (struct cgraph_node *node, void *data)
2231 hash_set<cgraph_node *> callers;
2232 bool res = inline_to_all_callers_1 (node, data, &callers);
2233 /* Perform the delayed update of the overall summary of all callers
2234 processed. This avoids quadratic behavior in the cases where
2235 we have a lot of calls to the same function. */
2236 for (hash_set<cgraph_node *>::iterator i = callers.begin ();
2237 i != callers.end (); ++i)
2238 inline_update_overall_summary (*i);
2239 return res;
2242 /* Output overall time estimate. */
2243 static void
2244 dump_overall_stats (void)
2246 int64_t sum_weighted = 0, sum = 0;
2247 struct cgraph_node *node;
2249 FOR_EACH_DEFINED_FUNCTION (node)
2250 if (!node->global.inlined_to
2251 && !node->alias)
2253 int time = inline_summaries->get (node)->time;
2254 sum += time;
2255 sum_weighted += time * node->count;
2257 fprintf (dump_file, "Overall time estimate: "
2258 "%" PRId64" weighted by profile: "
2259 "%" PRId64"\n", sum, sum_weighted);
2262 /* Output some useful stats about inlining. */
2264 static void
2265 dump_inline_stats (void)
2267 int64_t inlined_cnt = 0, inlined_indir_cnt = 0;
2268 int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0;
2269 int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0;
2270 int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
2271 int64_t inlined_speculative = 0, inlined_speculative_ply = 0;
2272 int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
2273 int64_t reason[CIF_N_REASONS][3];
2274 int i;
2275 struct cgraph_node *node;
2277 memset (reason, 0, sizeof (reason));
2278 FOR_EACH_DEFINED_FUNCTION (node)
2280 struct cgraph_edge *e;
2281 for (e = node->callees; e; e = e->next_callee)
2283 if (e->inline_failed)
2285 reason[(int) e->inline_failed][0] += e->count;
2286 reason[(int) e->inline_failed][1] += e->frequency;
2287 reason[(int) e->inline_failed][2] ++;
2288 if (DECL_VIRTUAL_P (e->callee->decl))
2290 if (e->indirect_inlining_edge)
2291 noninlined_virt_indir_cnt += e->count;
2292 else
2293 noninlined_virt_cnt += e->count;
2295 else
2297 if (e->indirect_inlining_edge)
2298 noninlined_indir_cnt += e->count;
2299 else
2300 noninlined_cnt += e->count;
2303 else
2305 if (e->speculative)
2307 if (DECL_VIRTUAL_P (e->callee->decl))
2308 inlined_speculative_ply += e->count;
2309 else
2310 inlined_speculative += e->count;
2312 else if (DECL_VIRTUAL_P (e->callee->decl))
2314 if (e->indirect_inlining_edge)
2315 inlined_virt_indir_cnt += e->count;
2316 else
2317 inlined_virt_cnt += e->count;
2319 else
2321 if (e->indirect_inlining_edge)
2322 inlined_indir_cnt += e->count;
2323 else
2324 inlined_cnt += e->count;
2328 for (e = node->indirect_calls; e; e = e->next_callee)
2329 if (e->indirect_info->polymorphic)
2330 indirect_poly_cnt += e->count;
2331 else
2332 indirect_cnt += e->count;
2334 if (max_count)
2336 fprintf (dump_file,
2337 "Inlined %" PRId64 " + speculative "
2338 "%" PRId64 " + speculative polymorphic "
2339 "%" PRId64 " + previously indirect "
2340 "%" PRId64 " + virtual "
2341 "%" PRId64 " + virtual and previously indirect "
2342 "%" PRId64 "\n" "Not inlined "
2343 "%" PRId64 " + previously indirect "
2344 "%" PRId64 " + virtual "
2345 "%" PRId64 " + virtual and previously indirect "
2346 "%" PRId64 " + stil indirect "
2347 "%" PRId64 " + still indirect polymorphic "
2348 "%" PRId64 "\n", inlined_cnt,
2349 inlined_speculative, inlined_speculative_ply,
2350 inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
2351 noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
2352 noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
2353 fprintf (dump_file,
2354 "Removed speculations %" PRId64 "\n",
2355 spec_rem);
2357 dump_overall_stats ();
2358 fprintf (dump_file, "\nWhy inlining failed?\n");
2359 for (i = 0; i < CIF_N_REASONS; i++)
2360 if (reason[i][2])
2361 fprintf (dump_file, "%-50s: %8i calls, %8i freq, %" PRId64" count\n",
2362 cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
2363 (int) reason[i][2], (int) reason[i][1], reason[i][0]);
2366 /* Decide on the inlining. We do so in the topological order to avoid
2367 expenses on updating data structures. */
2369 static unsigned int
2370 ipa_inline (void)
2372 struct cgraph_node *node;
2373 int nnodes;
2374 struct cgraph_node **order;
2375 int i;
2376 int cold;
2377 bool remove_functions = false;
2379 if (!optimize)
2380 return 0;
2382 cgraph_freq_base_rec = (sreal) 1 / (sreal) CGRAPH_FREQ_BASE;
2383 percent_rec = (sreal) 1 / (sreal) 100;
2385 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
2387 if (in_lto_p && optimize)
2388 ipa_update_after_lto_read ();
2390 if (dump_file)
2391 dump_inline_summaries (dump_file);
2393 nnodes = ipa_reverse_postorder (order);
2395 FOR_EACH_FUNCTION (node)
2397 node->aux = 0;
2399 /* Recompute the default reasons for inlining because they may have
2400 changed during merging. */
2401 if (in_lto_p)
2403 for (cgraph_edge *e = node->callees; e; e = e->next_callee)
2405 gcc_assert (e->inline_failed);
2406 initialize_inline_failed (e);
2408 for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
2409 initialize_inline_failed (e);
2413 if (dump_file)
2414 fprintf (dump_file, "\nFlattening functions:\n");
2416 /* In the first pass handle functions to be flattened. Do this with
2417 a priority so none of our later choices will make this impossible. */
2418 for (i = nnodes - 1; i >= 0; i--)
2420 node = order[i];
2422 /* Handle nodes to be flattened.
2423 Ideally when processing callees we stop inlining at the
2424 entry of cycles, possibly cloning that entry point and
2425 try to flatten itself turning it into a self-recursive
2426 function. */
2427 if (lookup_attribute ("flatten",
2428 DECL_ATTRIBUTES (node->decl)) != NULL)
2430 if (dump_file)
2431 fprintf (dump_file,
2432 "Flattening %s\n", node->name ());
2433 flatten_function (node, false);
2436 if (dump_file)
2437 dump_overall_stats ();
2439 inline_small_functions ();
2441 gcc_assert (symtab->state == IPA_SSA);
2442 symtab->state = IPA_SSA_AFTER_INLINING;
2443 /* Do first after-inlining removal. We want to remove all "stale" extern
2444 inline functions and virtual functions so we really know what is called
2445 once. */
2446 symtab->remove_unreachable_nodes (dump_file);
2447 free (order);
2449 /* Inline functions with a property that after inlining into all callers the
2450 code size will shrink because the out-of-line copy is eliminated.
2451 We do this regardless on the callee size as long as function growth limits
2452 are met. */
2453 if (dump_file)
2454 fprintf (dump_file,
2455 "\nDeciding on functions to be inlined into all callers and "
2456 "removing useless speculations:\n");
2458 /* Inlining one function called once has good chance of preventing
2459 inlining other function into the same callee. Ideally we should
2460 work in priority order, but probably inlining hot functions first
2461 is good cut without the extra pain of maintaining the queue.
2463 ??? this is not really fitting the bill perfectly: inlining function
2464 into callee often leads to better optimization of callee due to
2465 increased context for optimization.
2466 For example if main() function calls a function that outputs help
2467 and then function that does the main optmization, we should inline
2468 the second with priority even if both calls are cold by themselves.
2470 We probably want to implement new predicate replacing our use of
2471 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2472 to be hot. */
2473 for (cold = 0; cold <= 1; cold ++)
2475 FOR_EACH_DEFINED_FUNCTION (node)
2477 struct cgraph_edge *edge, *next;
2478 bool update=false;
2480 for (edge = node->callees; edge; edge = next)
2482 next = edge->next_callee;
2483 if (edge->speculative && !speculation_useful_p (edge, false))
2485 edge->resolve_speculation ();
2486 spec_rem += edge->count;
2487 update = true;
2488 remove_functions = true;
2491 if (update)
2493 struct cgraph_node *where = node->global.inlined_to
2494 ? node->global.inlined_to : node;
2495 reset_edge_caches (where);
2496 inline_update_overall_summary (where);
2498 if (want_inline_function_to_all_callers_p (node, cold))
2500 int num_calls = 0;
2501 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
2502 true);
2503 while (node->call_for_symbol_and_aliases
2504 (inline_to_all_callers, &num_calls, true))
2506 remove_functions = true;
2511 /* Free ipa-prop structures if they are no longer needed. */
2512 if (optimize)
2513 ipa_free_all_structures_after_iinln ();
2515 if (dump_file)
2517 fprintf (dump_file,
2518 "\nInlined %i calls, eliminated %i functions\n\n",
2519 ncalls_inlined, nfunctions_inlined);
2520 dump_inline_stats ();
2523 if (dump_file)
2524 dump_inline_summaries (dump_file);
2525 /* In WPA we use inline summaries for partitioning process. */
2526 if (!flag_wpa)
2527 inline_free_summary ();
2528 return remove_functions ? TODO_remove_functions : 0;
2531 /* Inline always-inline function calls in NODE. */
2533 static bool
2534 inline_always_inline_functions (struct cgraph_node *node)
2536 struct cgraph_edge *e;
2537 bool inlined = false;
2539 for (e = node->callees; e; e = e->next_callee)
2541 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2542 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
2543 continue;
2545 if (e->recursive_p ())
2547 if (dump_file)
2548 fprintf (dump_file, " Not inlining recursive call to %s.\n",
2549 e->callee->name ());
2550 e->inline_failed = CIF_RECURSIVE_INLINING;
2551 continue;
2554 if (!can_early_inline_edge_p (e))
2556 /* Set inlined to true if the callee is marked "always_inline" but
2557 is not inlinable. This will allow flagging an error later in
2558 expand_call_inline in tree-inline.c. */
2559 if (lookup_attribute ("always_inline",
2560 DECL_ATTRIBUTES (callee->decl)) != NULL)
2561 inlined = true;
2562 continue;
2565 if (dump_file)
2566 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
2567 xstrdup_for_dump (e->callee->name ()),
2568 xstrdup_for_dump (e->caller->name ()));
2569 inline_call (e, true, NULL, NULL, false);
2570 inlined = true;
2572 if (inlined)
2573 inline_update_overall_summary (node);
2575 return inlined;
2578 /* Decide on the inlining. We do so in the topological order to avoid
2579 expenses on updating data structures. */
2581 static bool
2582 early_inline_small_functions (struct cgraph_node *node)
2584 struct cgraph_edge *e;
2585 bool inlined = false;
2587 for (e = node->callees; e; e = e->next_callee)
2589 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2590 if (!inline_summaries->get (callee)->inlinable
2591 || !e->inline_failed)
2592 continue;
2594 /* Do not consider functions not declared inline. */
2595 if (!DECL_DECLARED_INLINE_P (callee->decl)
2596 && !opt_for_fn (node->decl, flag_inline_small_functions)
2597 && !opt_for_fn (node->decl, flag_inline_functions))
2598 continue;
2600 if (dump_file)
2601 fprintf (dump_file, "Considering inline candidate %s.\n",
2602 callee->name ());
2604 if (!can_early_inline_edge_p (e))
2605 continue;
2607 if (e->recursive_p ())
2609 if (dump_file)
2610 fprintf (dump_file, " Not inlining: recursive call.\n");
2611 continue;
2614 if (!want_early_inline_function_p (e))
2615 continue;
2617 if (dump_file)
2618 fprintf (dump_file, " Inlining %s into %s.\n",
2619 xstrdup_for_dump (callee->name ()),
2620 xstrdup_for_dump (e->caller->name ()));
2621 inline_call (e, true, NULL, NULL, false);
2622 inlined = true;
2625 if (inlined)
2626 inline_update_overall_summary (node);
2628 return inlined;
2631 unsigned int
2632 early_inliner (function *fun)
2634 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2635 struct cgraph_edge *edge;
2636 unsigned int todo = 0;
2637 int iterations = 0;
2638 bool inlined = false;
2640 if (seen_error ())
2641 return 0;
2643 /* Do nothing if datastructures for ipa-inliner are already computed. This
2644 happens when some pass decides to construct new function and
2645 cgraph_add_new_function calls lowering passes and early optimization on
2646 it. This may confuse ourself when early inliner decide to inline call to
2647 function clone, because function clones don't have parameter list in
2648 ipa-prop matching their signature. */
2649 if (ipa_node_params_sum)
2650 return 0;
2652 if (flag_checking)
2653 node->verify ();
2654 node->remove_all_references ();
2656 /* Rebuild this reference because it dosn't depend on
2657 function's body and it's required to pass cgraph_node
2658 verification. */
2659 if (node->instrumented_version
2660 && !node->instrumentation_clone)
2661 node->create_reference (node->instrumented_version, IPA_REF_CHKP, NULL);
2663 /* Even when not optimizing or not inlining inline always-inline
2664 functions. */
2665 inlined = inline_always_inline_functions (node);
2667 if (!optimize
2668 || flag_no_inline
2669 || !flag_early_inlining
2670 /* Never inline regular functions into always-inline functions
2671 during incremental inlining. This sucks as functions calling
2672 always inline functions will get less optimized, but at the
2673 same time inlining of functions calling always inline
2674 function into an always inline function might introduce
2675 cycles of edges to be always inlined in the callgraph.
2677 We might want to be smarter and just avoid this type of inlining. */
2678 || (DECL_DISREGARD_INLINE_LIMITS (node->decl)
2679 && lookup_attribute ("always_inline",
2680 DECL_ATTRIBUTES (node->decl))))
2682 else if (lookup_attribute ("flatten",
2683 DECL_ATTRIBUTES (node->decl)) != NULL)
2685 /* When the function is marked to be flattened, recursively inline
2686 all calls in it. */
2687 if (dump_file)
2688 fprintf (dump_file,
2689 "Flattening %s\n", node->name ());
2690 flatten_function (node, true);
2691 inlined = true;
2693 else
2695 /* If some always_inline functions was inlined, apply the changes.
2696 This way we will not account always inline into growth limits and
2697 moreover we will inline calls from always inlines that we skipped
2698 previously because of conditional above. */
2699 if (inlined)
2701 timevar_push (TV_INTEGRATION);
2702 todo |= optimize_inline_calls (current_function_decl);
2703 /* optimize_inline_calls call above might have introduced new
2704 statements that don't have inline parameters computed. */
2705 for (edge = node->callees; edge; edge = edge->next_callee)
2707 if (inline_edge_summary_vec.length () > (unsigned) edge->uid)
2709 struct inline_edge_summary *es = inline_edge_summary (edge);
2710 es->call_stmt_size
2711 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2712 es->call_stmt_time
2713 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2716 inline_update_overall_summary (node);
2717 inlined = false;
2718 timevar_pop (TV_INTEGRATION);
2720 /* We iterate incremental inlining to get trivial cases of indirect
2721 inlining. */
2722 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
2723 && early_inline_small_functions (node))
2725 timevar_push (TV_INTEGRATION);
2726 todo |= optimize_inline_calls (current_function_decl);
2728 /* Technically we ought to recompute inline parameters so the new
2729 iteration of early inliner works as expected. We however have
2730 values approximately right and thus we only need to update edge
2731 info that might be cleared out for newly discovered edges. */
2732 for (edge = node->callees; edge; edge = edge->next_callee)
2734 /* We have no summary for new bound store calls yet. */
2735 if (inline_edge_summary_vec.length () > (unsigned)edge->uid)
2737 struct inline_edge_summary *es = inline_edge_summary (edge);
2738 es->call_stmt_size
2739 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2740 es->call_stmt_time
2741 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2743 if (edge->callee->decl
2744 && !gimple_check_call_matching_types (
2745 edge->call_stmt, edge->callee->decl, false))
2747 edge->inline_failed = CIF_MISMATCHED_ARGUMENTS;
2748 edge->call_stmt_cannot_inline_p = true;
2751 if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
2752 inline_update_overall_summary (node);
2753 timevar_pop (TV_INTEGRATION);
2754 iterations++;
2755 inlined = false;
2757 if (dump_file)
2758 fprintf (dump_file, "Iterations: %i\n", iterations);
2761 if (inlined)
2763 timevar_push (TV_INTEGRATION);
2764 todo |= optimize_inline_calls (current_function_decl);
2765 timevar_pop (TV_INTEGRATION);
2768 fun->always_inline_functions_inlined = true;
2770 return todo;
2773 /* Do inlining of small functions. Doing so early helps profiling and other
2774 passes to be somewhat more effective and avoids some code duplication in
2775 later real inlining pass for testcases with very many function calls. */
2777 namespace {
2779 const pass_data pass_data_early_inline =
2781 GIMPLE_PASS, /* type */
2782 "einline", /* name */
2783 OPTGROUP_INLINE, /* optinfo_flags */
2784 TV_EARLY_INLINING, /* tv_id */
2785 PROP_ssa, /* properties_required */
2786 0, /* properties_provided */
2787 0, /* properties_destroyed */
2788 0, /* todo_flags_start */
2789 0, /* todo_flags_finish */
2792 class pass_early_inline : public gimple_opt_pass
2794 public:
2795 pass_early_inline (gcc::context *ctxt)
2796 : gimple_opt_pass (pass_data_early_inline, ctxt)
2799 /* opt_pass methods: */
2800 virtual unsigned int execute (function *);
2802 }; // class pass_early_inline
2804 unsigned int
2805 pass_early_inline::execute (function *fun)
2807 return early_inliner (fun);
2810 } // anon namespace
2812 gimple_opt_pass *
2813 make_pass_early_inline (gcc::context *ctxt)
2815 return new pass_early_inline (ctxt);
2818 namespace {
2820 const pass_data pass_data_ipa_inline =
2822 IPA_PASS, /* type */
2823 "inline", /* name */
2824 OPTGROUP_INLINE, /* optinfo_flags */
2825 TV_IPA_INLINING, /* tv_id */
2826 0, /* properties_required */
2827 0, /* properties_provided */
2828 0, /* properties_destroyed */
2829 0, /* todo_flags_start */
2830 ( TODO_dump_symtab ), /* todo_flags_finish */
2833 class pass_ipa_inline : public ipa_opt_pass_d
2835 public:
2836 pass_ipa_inline (gcc::context *ctxt)
2837 : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
2838 inline_generate_summary, /* generate_summary */
2839 inline_write_summary, /* write_summary */
2840 inline_read_summary, /* read_summary */
2841 NULL, /* write_optimization_summary */
2842 NULL, /* read_optimization_summary */
2843 NULL, /* stmt_fixup */
2844 0, /* function_transform_todo_flags_start */
2845 inline_transform, /* function_transform */
2846 NULL) /* variable_transform */
2849 /* opt_pass methods: */
2850 virtual unsigned int execute (function *) { return ipa_inline (); }
2852 }; // class pass_ipa_inline
2854 } // anon namespace
2856 ipa_opt_pass_d *
2857 make_pass_ipa_inline (gcc::context *ctxt)
2859 return new pass_ipa_inline (ctxt);