2011-10-20 Steven G. Kargl <kargl@gcc.gnu.org>
[official-gcc.git] / gcc / ipa-inline.c
blobf53f00174a8f0eb8121375b982593e4ee179d219
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 The implementation of inliner is organized as follows:
26 inlining heuristics limits
28 can_inline_edge_p allow to check that particular inlining is allowed
29 by the limits specified by user (allowed function growth, growth and so
30 on).
32 Functions are inlined when it is obvious the result is profitable (such
33 as functions called once or when inlining reduce code size).
34 In addition to that we perform inlining of small functions and recursive
35 inlining.
37 inlining heuristics
39 The inliner itself is split into two passes:
41 pass_early_inlining
43 Simple local inlining pass inlining callees into current function.
44 This pass makes no use of whole unit analysis and thus it can do only
45 very simple decisions based on local properties.
47 The strength of the pass is that it is run in topological order
48 (reverse postorder) on the callgraph. Functions are converted into SSA
49 form just before this pass and optimized subsequently. As a result, the
50 callees of the function seen by the early inliner was already optimized
51 and results of early inlining adds a lot of optimization opportunities
52 for the local optimization.
54 The pass handle the obvious inlining decisions within the compilation
55 unit - inlining auto inline functions, inlining for size and
56 flattening.
58 main strength of the pass is the ability to eliminate abstraction
59 penalty in C++ code (via combination of inlining and early
60 optimization) and thus improve quality of analysis done by real IPA
61 optimizers.
63 Because of lack of whole unit knowledge, the pass can not really make
64 good code size/performance tradeoffs. It however does very simple
65 speculative inlining allowing code size to grow by
66 EARLY_INLINING_INSNS when callee is leaf function. In this case the
67 optimizations performed later are very likely to eliminate the cost.
69 pass_ipa_inline
71 This is the real inliner able to handle inlining with whole program
72 knowledge. It performs following steps:
74 1) inlining of small functions. This is implemented by greedy
75 algorithm ordering all inlinable cgraph edges by their badness and
76 inlining them in this order as long as inline limits allows doing so.
78 This heuristics is not very good on inlining recursive calls. Recursive
79 calls can be inlined with results similar to loop unrolling. To do so,
80 special purpose recursive inliner is executed on function when
81 recursive edge is met as viable candidate.
83 2) Unreachable functions are removed from callgraph. Inlining leads
84 to devirtualization and other modification of callgraph so functions
85 may become unreachable during the process. Also functions declared as
86 extern inline or virtual functions are removed, since after inlining
87 we no longer need the offline bodies.
89 3) Functions called once and not exported from the unit are inlined.
90 This should almost always lead to reduction of code size by eliminating
91 the need for offline copy of the function. */
93 #include "config.h"
94 #include "system.h"
95 #include "coretypes.h"
96 #include "tm.h"
97 #include "tree.h"
98 #include "tree-inline.h"
99 #include "langhooks.h"
100 #include "flags.h"
101 #include "cgraph.h"
102 #include "diagnostic.h"
103 #include "gimple-pretty-print.h"
104 #include "timevar.h"
105 #include "params.h"
106 #include "fibheap.h"
107 #include "intl.h"
108 #include "tree-pass.h"
109 #include "coverage.h"
110 #include "ggc.h"
111 #include "rtl.h"
112 #include "tree-flow.h"
113 #include "ipa-prop.h"
114 #include "except.h"
115 #include "target.h"
116 #include "ipa-inline.h"
117 #include "ipa-utils.h"
119 /* Statistics we collect about inlining algorithm. */
120 static int overall_size;
121 static gcov_type max_count;
123 /* Return false when inlining edge E would lead to violating
124 limits on function unit growth or stack usage growth.
126 The relative function body growth limit is present generally
127 to avoid problems with non-linear behavior of the compiler.
128 To allow inlining huge functions into tiny wrapper, the limit
129 is always based on the bigger of the two functions considered.
131 For stack growth limits we always base the growth in stack usage
132 of the callers. We want to prevent applications from segfaulting
133 on stack overflow when functions with huge stack frames gets
134 inlined. */
136 static bool
137 caller_growth_limits (struct cgraph_edge *e)
139 struct cgraph_node *to = e->caller;
140 struct cgraph_node *what = cgraph_function_or_thunk_node (e->callee, NULL);
141 int newsize;
142 int limit = 0;
143 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
144 struct inline_summary *info, *what_info, *outer_info = inline_summary (to);
146 /* Look for function e->caller is inlined to. While doing
147 so work out the largest function body on the way. As
148 described above, we want to base our function growth
149 limits based on that. Not on the self size of the
150 outer function, not on the self size of inline code
151 we immediately inline to. This is the most relaxed
152 interpretation of the rule "do not grow large functions
153 too much in order to prevent compiler from exploding". */
154 while (true)
156 info = inline_summary (to);
157 if (limit < info->self_size)
158 limit = info->self_size;
159 if (stack_size_limit < info->estimated_self_stack_size)
160 stack_size_limit = info->estimated_self_stack_size;
161 if (to->global.inlined_to)
162 to = to->callers->caller;
163 else
164 break;
167 what_info = inline_summary (what);
169 if (limit < what_info->self_size)
170 limit = what_info->self_size;
172 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
174 /* Check the size after inlining against the function limits. But allow
175 the function to shrink if it went over the limits by forced inlining. */
176 newsize = estimate_size_after_inlining (to, e);
177 if (newsize >= info->size
178 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
179 && newsize > limit)
181 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
182 return false;
185 if (!what_info->estimated_stack_size)
186 return true;
188 /* FIXME: Stack size limit often prevents inlining in Fortran programs
189 due to large i/o datastructures used by the Fortran front-end.
190 We ought to ignore this limit when we know that the edge is executed
191 on every invocation of the caller (i.e. its call statement dominates
192 exit block). We do not track this information, yet. */
193 stack_size_limit += ((gcov_type)stack_size_limit
194 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
196 inlined_stack = (outer_info->stack_frame_offset
197 + outer_info->estimated_self_stack_size
198 + what_info->estimated_stack_size);
199 /* Check new stack consumption with stack consumption at the place
200 stack is used. */
201 if (inlined_stack > stack_size_limit
202 /* If function already has large stack usage from sibling
203 inline call, we can inline, too.
204 This bit overoptimistically assume that we are good at stack
205 packing. */
206 && inlined_stack > info->estimated_stack_size
207 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
209 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
210 return false;
212 return true;
215 /* Dump info about why inlining has failed. */
217 static void
218 report_inline_failed_reason (struct cgraph_edge *e)
220 if (dump_file)
222 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
223 cgraph_node_name (e->caller), e->caller->uid,
224 cgraph_node_name (e->callee), e->callee->uid,
225 cgraph_inline_failed_string (e->inline_failed));
229 /* Decide if we can inline the edge and possibly update
230 inline_failed reason.
231 We check whether inlining is possible at all and whether
232 caller growth limits allow doing so.
234 if REPORT is true, output reason to the dump file. */
236 static bool
237 can_inline_edge_p (struct cgraph_edge *e, bool report)
239 bool inlinable = true;
240 enum availability avail;
241 struct cgraph_node *callee
242 = cgraph_function_or_thunk_node (e->callee, &avail);
243 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e->caller->decl);
244 tree callee_tree
245 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
246 struct function *caller_cfun = DECL_STRUCT_FUNCTION (e->caller->decl);
247 struct function *callee_cfun
248 = callee ? DECL_STRUCT_FUNCTION (callee->decl) : NULL;
250 if (!caller_cfun && e->caller->clone_of)
251 caller_cfun = DECL_STRUCT_FUNCTION (e->caller->clone_of->decl);
253 if (!callee_cfun && callee && callee->clone_of)
254 callee_cfun = DECL_STRUCT_FUNCTION (callee->clone_of->decl);
256 gcc_assert (e->inline_failed);
258 if (!callee || !callee->analyzed)
260 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
261 inlinable = false;
263 else if (!inline_summary (callee)->inlinable)
265 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
266 inlinable = false;
268 else if (avail <= AVAIL_OVERWRITABLE)
270 e->inline_failed = CIF_OVERWRITABLE;
271 return false;
273 else if (e->call_stmt_cannot_inline_p)
275 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
276 inlinable = false;
278 /* Don't inline if the functions have different EH personalities. */
279 else if (DECL_FUNCTION_PERSONALITY (e->caller->decl)
280 && DECL_FUNCTION_PERSONALITY (callee->decl)
281 && (DECL_FUNCTION_PERSONALITY (e->caller->decl)
282 != DECL_FUNCTION_PERSONALITY (callee->decl)))
284 e->inline_failed = CIF_EH_PERSONALITY;
285 inlinable = false;
287 /* Don't inline if the callee can throw non-call exceptions but the
288 caller cannot.
289 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
290 Move the flag into cgraph node or mirror it in the inline summary. */
291 else if (callee_cfun && callee_cfun->can_throw_non_call_exceptions
292 && !(caller_cfun && caller_cfun->can_throw_non_call_exceptions))
294 e->inline_failed = CIF_NON_CALL_EXCEPTIONS;
295 inlinable = false;
297 /* Check compatibility of target optimization options. */
298 else if (!targetm.target_option.can_inline_p (e->caller->decl,
299 callee->decl))
301 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
302 inlinable = false;
304 /* Check if caller growth allows the inlining. */
305 else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
306 && !lookup_attribute ("flatten",
307 DECL_ATTRIBUTES
308 (e->caller->global.inlined_to
309 ? e->caller->global.inlined_to->decl
310 : e->caller->decl))
311 && !caller_growth_limits (e))
312 inlinable = false;
313 /* Don't inline a function with a higher optimization level than the
314 caller. FIXME: this is really just tip of iceberg of handling
315 optimization attribute. */
316 else if (caller_tree != callee_tree)
318 struct cl_optimization *caller_opt
319 = TREE_OPTIMIZATION ((caller_tree)
320 ? caller_tree
321 : optimization_default_node);
323 struct cl_optimization *callee_opt
324 = TREE_OPTIMIZATION ((callee_tree)
325 ? callee_tree
326 : optimization_default_node);
328 if (((caller_opt->x_optimize > callee_opt->x_optimize)
329 || (caller_opt->x_optimize_size != callee_opt->x_optimize_size))
330 /* gcc.dg/pr43564.c. Look at forced inline even in -O0. */
331 && !DECL_DISREGARD_INLINE_LIMITS (e->callee->decl))
333 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
334 inlinable = false;
338 /* Be sure that the cannot_inline_p flag is up to date. */
339 gcc_checking_assert (!e->call_stmt
340 || (gimple_call_cannot_inline_p (e->call_stmt)
341 == e->call_stmt_cannot_inline_p)
342 /* In -flto-partition=none mode we really keep things out of
343 sync because call_stmt_cannot_inline_p is set at cgraph
344 merging when function bodies are not there yet. */
345 || (in_lto_p && !gimple_call_cannot_inline_p (e->call_stmt)));
346 if (!inlinable && report)
347 report_inline_failed_reason (e);
348 return inlinable;
352 /* Return true if the edge E is inlinable during early inlining. */
354 static bool
355 can_early_inline_edge_p (struct cgraph_edge *e)
357 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee,
358 NULL);
359 /* Early inliner might get called at WPA stage when IPA pass adds new
360 function. In this case we can not really do any of early inlining
361 because function bodies are missing. */
362 if (!gimple_has_body_p (callee->decl))
364 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
365 return false;
367 /* In early inliner some of callees may not be in SSA form yet
368 (i.e. the callgraph is cyclic and we did not process
369 the callee by early inliner, yet). We don't have CIF code for this
370 case; later we will re-do the decision in the real inliner. */
371 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
372 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
374 if (dump_file)
375 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
376 return false;
378 if (!can_inline_edge_p (e, true))
379 return false;
380 return true;
384 /* Return true when N is leaf function. Accept cheap builtins
385 in leaf functions. */
387 static bool
388 leaf_node_p (struct cgraph_node *n)
390 struct cgraph_edge *e;
391 for (e = n->callees; e; e = e->next_callee)
392 if (!is_inexpensive_builtin (e->callee->decl))
393 return false;
394 return true;
398 /* Return true if we are interested in inlining small function. */
400 static bool
401 want_early_inline_function_p (struct cgraph_edge *e)
403 bool want_inline = true;
404 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
406 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
408 else if (!DECL_DECLARED_INLINE_P (callee->decl)
409 && !flag_inline_small_functions)
411 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
412 report_inline_failed_reason (e);
413 want_inline = false;
415 else
417 int growth = estimate_edge_growth (e);
418 if (growth <= 0)
420 else if (!cgraph_maybe_hot_edge_p (e)
421 && growth > 0)
423 if (dump_file)
424 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
425 "call is cold and code would grow by %i\n",
426 cgraph_node_name (e->caller), e->caller->uid,
427 cgraph_node_name (callee), callee->uid,
428 growth);
429 want_inline = false;
431 else if (!leaf_node_p (callee)
432 && growth > 0)
434 if (dump_file)
435 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
436 "callee is not leaf and code would grow by %i\n",
437 cgraph_node_name (e->caller), e->caller->uid,
438 cgraph_node_name (callee), callee->uid,
439 growth);
440 want_inline = false;
442 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
444 if (dump_file)
445 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
446 "growth %i exceeds --param early-inlining-insns\n",
447 cgraph_node_name (e->caller), e->caller->uid,
448 cgraph_node_name (callee), callee->uid,
449 growth);
450 want_inline = false;
453 return want_inline;
456 /* Return true if we are interested in inlining small function.
457 When REPORT is true, report reason to dump file. */
459 static bool
460 want_inline_small_function_p (struct cgraph_edge *e, bool report)
462 bool want_inline = true;
463 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
465 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
467 else if (!DECL_DECLARED_INLINE_P (callee->decl)
468 && !flag_inline_small_functions)
470 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
471 want_inline = false;
473 else
475 int growth = estimate_edge_growth (e);
477 if (growth <= 0)
479 else if (DECL_DECLARED_INLINE_P (callee->decl)
480 && growth >= MAX_INLINE_INSNS_SINGLE)
482 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
483 want_inline = false;
485 else if (!DECL_DECLARED_INLINE_P (callee->decl)
486 && !flag_inline_functions)
488 e->inline_failed = CIF_NOT_DECLARED_INLINED;
489 want_inline = false;
491 else if (!DECL_DECLARED_INLINE_P (callee->decl)
492 && growth >= MAX_INLINE_INSNS_AUTO)
494 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
495 want_inline = false;
497 /* If call is cold, do not inline when function body would grow.
498 Still inline when the overall unit size will shrink because the offline
499 copy of function being eliminated.
501 This is slightly wrong on aggressive side: it is entirely possible
502 that function is called many times with a context where inlining
503 reduces code size and few times with a context where inlining increase
504 code size. Resoluting growth estimate will be negative even if it
505 would make more sense to keep offline copy and do not inline into the
506 call sites that makes the code size grow.
508 When badness orders the calls in a way that code reducing calls come
509 first, this situation is not a problem at all: after inlining all
510 "good" calls, we will realize that keeping the function around is
511 better. */
512 else if (!cgraph_maybe_hot_edge_p (e)
513 && (DECL_EXTERNAL (callee->decl)
515 /* Unlike for functions called once, we play unsafe with
516 COMDATs. We can allow that since we know functions
517 in consideration are small (and thus risk is small) and
518 moreover grow estimates already accounts that COMDAT
519 functions may or may not disappear when eliminated from
520 current unit. With good probability making aggressive
521 choice in all units is going to make overall program
522 smaller.
524 Consequently we ask cgraph_can_remove_if_no_direct_calls_p
525 instead of
526 cgraph_will_be_removed_from_program_if_no_direct_calls */
528 || !cgraph_can_remove_if_no_direct_calls_p (callee)
529 || estimate_growth (callee) > 0))
531 e->inline_failed = CIF_UNLIKELY_CALL;
532 want_inline = false;
535 if (!want_inline && report)
536 report_inline_failed_reason (e);
537 return want_inline;
540 /* EDGE is self recursive edge.
541 We hand two cases - when function A is inlining into itself
542 or when function A is being inlined into another inliner copy of function
543 A within function B.
545 In first case OUTER_NODE points to the toplevel copy of A, while
546 in the second case OUTER_NODE points to the outermost copy of A in B.
548 In both cases we want to be extra selective since
549 inlining the call will just introduce new recursive calls to appear. */
551 static bool
552 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
553 struct cgraph_node *outer_node,
554 bool peeling,
555 int depth)
557 char const *reason = NULL;
558 bool want_inline = true;
559 int caller_freq = CGRAPH_FREQ_BASE;
560 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
562 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
563 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
565 if (!cgraph_maybe_hot_edge_p (edge))
567 reason = "recursive call is cold";
568 want_inline = false;
570 else if (max_count && !outer_node->count)
572 reason = "not executed in profile";
573 want_inline = false;
575 else if (depth > max_depth)
577 reason = "--param max-inline-recursive-depth exceeded.";
578 want_inline = false;
581 if (outer_node->global.inlined_to)
582 caller_freq = outer_node->callers->frequency;
584 if (!want_inline)
586 /* Inlining of self recursive function into copy of itself within other function
587 is transformation similar to loop peeling.
589 Peeling is profitable if we can inline enough copies to make probability
590 of actual call to the self recursive function very small. Be sure that
591 the probability of recursion is small.
593 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
594 This way the expected number of recision is at most max_depth. */
595 else if (peeling)
597 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
598 / max_depth);
599 int i;
600 for (i = 1; i < depth; i++)
601 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
602 if (max_count
603 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
604 >= max_prob))
606 reason = "profile of recursive call is too large";
607 want_inline = false;
609 if (!max_count
610 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
611 >= max_prob))
613 reason = "frequency of recursive call is too large";
614 want_inline = false;
617 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
618 depth is large. We reduce function call overhead and increase chances that
619 things fit in hardware return predictor.
621 Recursive inlining might however increase cost of stack frame setup
622 actually slowing down functions whose recursion tree is wide rather than
623 deep.
625 Deciding reliably on when to do recursive inlining without profile feedback
626 is tricky. For now we disable recursive inlining when probability of self
627 recursion is low.
629 Recursive inlining of self recursive call within loop also results in large loop
630 depths that generally optimize badly. We may want to throttle down inlining
631 in those cases. In particular this seems to happen in one of libstdc++ rb tree
632 methods. */
633 else
635 if (max_count
636 && (edge->count * 100 / outer_node->count
637 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
639 reason = "profile of recursive call is too small";
640 want_inline = false;
642 else if (!max_count
643 && (edge->frequency * 100 / caller_freq
644 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
646 reason = "frequency of recursive call is too small";
647 want_inline = false;
650 if (!want_inline && dump_file)
651 fprintf (dump_file, " not inlining recursively: %s\n", reason);
652 return want_inline;
655 /* Return true when NODE has caller other than EDGE.
656 Worker for cgraph_for_node_and_aliases. */
658 static bool
659 check_caller_edge (struct cgraph_node *node, void *edge)
661 return (node->callers
662 && node->callers != edge);
666 /* Decide if NODE is called once inlining it would eliminate need
667 for the offline copy of function. */
669 static bool
670 want_inline_function_called_once_p (struct cgraph_node *node)
672 struct cgraph_node *function = cgraph_function_or_thunk_node (node, NULL);
673 /* Already inlined? */
674 if (function->global.inlined_to)
675 return false;
676 /* Zero or more then one callers? */
677 if (!node->callers
678 || node->callers->next_caller)
679 return false;
680 /* Maybe other aliases has more direct calls. */
681 if (cgraph_for_node_and_aliases (node, check_caller_edge, node->callers, true))
682 return false;
683 /* Recursive call makes no sense to inline. */
684 if (cgraph_edge_recursive_p (node->callers))
685 return false;
686 /* External functions are not really in the unit, so inlining
687 them when called once would just increase the program size. */
688 if (DECL_EXTERNAL (function->decl))
689 return false;
690 /* Offline body must be optimized out. */
691 if (!cgraph_will_be_removed_from_program_if_no_direct_calls (function))
692 return false;
693 if (!can_inline_edge_p (node->callers, true))
694 return false;
695 return true;
699 /* Return relative time improvement for inlining EDGE in range
700 1...2^9. */
702 static inline int
703 relative_time_benefit (struct inline_summary *callee_info,
704 struct cgraph_edge *edge,
705 int time_growth)
707 int relbenefit;
708 gcov_type uninlined_call_time;
710 uninlined_call_time =
711 ((gcov_type)
712 (callee_info->time
713 + inline_edge_summary (edge)->call_stmt_time) * edge->frequency
714 + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
715 /* Compute relative time benefit, i.e. how much the call becomes faster.
716 ??? perhaps computing how much the caller+calle together become faster
717 would lead to more realistic results. */
718 if (!uninlined_call_time)
719 uninlined_call_time = 1;
720 relbenefit =
721 (uninlined_call_time - time_growth) * 256 / (uninlined_call_time);
722 relbenefit = MIN (relbenefit, 512);
723 relbenefit = MAX (relbenefit, 1);
724 return relbenefit;
728 /* A cost model driving the inlining heuristics in a way so the edges with
729 smallest badness are inlined first. After each inlining is performed
730 the costs of all caller edges of nodes affected are recomputed so the
731 metrics may accurately depend on values such as number of inlinable callers
732 of the function or function body size. */
734 static int
735 edge_badness (struct cgraph_edge *edge, bool dump)
737 gcov_type badness;
738 int growth, time_growth;
739 struct cgraph_node *callee = cgraph_function_or_thunk_node (edge->callee,
740 NULL);
741 struct inline_summary *callee_info = inline_summary (callee);
743 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
744 return INT_MIN;
746 growth = estimate_edge_growth (edge);
747 time_growth = estimate_edge_time (edge);
749 if (dump)
751 fprintf (dump_file, " Badness calculation for %s -> %s\n",
752 cgraph_node_name (edge->caller),
753 cgraph_node_name (callee));
754 fprintf (dump_file, " size growth %i, time growth %i\n",
755 growth,
756 time_growth);
759 /* Always prefer inlining saving code size. */
760 if (growth <= 0)
762 badness = INT_MIN / 2 + growth;
763 if (dump)
764 fprintf (dump_file, " %i: Growth %i <= 0\n", (int) badness,
765 growth);
768 /* When profiling is available, compute badness as:
770 relative_edge_count * relative_time_benefit
771 goodness = -------------------------------------------
772 edge_growth
773 badness = -goodness
775 The fraction is upside down, becuase on edge counts and time beneits
776 the bounds are known. Edge growth is essentially unlimited. */
778 else if (max_count)
780 int relbenefit = relative_time_benefit (callee_info, edge, time_growth);
781 badness =
782 ((int)
783 ((double) edge->count * INT_MIN / 2 / max_count / 512) *
784 relative_time_benefit (callee_info, edge, time_growth)) / growth;
786 /* Be sure that insanity of the profile won't lead to increasing counts
787 in the scalling and thus to overflow in the computation above. */
788 gcc_assert (max_count >= edge->count);
789 if (dump)
791 fprintf (dump_file,
792 " %i (relative %f): profile info. Relative count %f"
793 " * Relative benefit %f\n",
794 (int) badness, (double) badness / INT_MIN,
795 (double) edge->count / max_count,
796 relbenefit * 100 / 256.0);
800 /* When function local profile is available. Compute badness as:
803 growth_of_callee
804 badness = -------------------------------------- + growth_for-all
805 relative_time_benefit * edge_frequency
808 else if (flag_guess_branch_prob)
810 int div = edge->frequency * (1<<10) / CGRAPH_FREQ_MAX;
811 int growth_for_all;
813 div = MAX (div, 1);
814 gcc_checking_assert (edge->frequency <= CGRAPH_FREQ_MAX);
815 div *= relative_time_benefit (callee_info, edge, time_growth);
817 /* frequency is normalized in range 1...2^10.
818 relbenefit in range 1...2^9
819 DIV should be in range 1....2^19. */
820 gcc_checking_assert (div >= 1 && div <= (1<<19));
822 /* Result must be integer in range 0...INT_MAX.
823 Set the base of fixed point calculation so we don't lose much of
824 precision for small bandesses (those are interesting) yet we don't
825 overflow for growths that are still in interesting range. */
826 badness = ((gcov_type)growth) * (1<<18);
827 badness = (badness + div / 2) / div;
829 /* Overall growth of inlining all calls of function matters: we want to
830 inline so offline copy of function is no longer needed.
832 Additionally functions that can be fully inlined without much of
833 effort are better inline candidates than functions that can be fully
834 inlined only after noticeable overall unit growths. The latter
835 are better in a sense compressing of code size by factoring out common
836 code into separate function shared by multiple code paths.
838 We might mix the valud into the fraction by taking into account
839 relative growth of the unit, but for now just add the number
840 into resulting fraction. */
841 growth_for_all = estimate_growth (callee);
842 badness += growth_for_all;
843 if (badness > INT_MAX - 1)
844 badness = INT_MAX - 1;
845 if (dump)
847 fprintf (dump_file,
848 " %i: guessed profile. frequency %f, overall growth %i,"
849 " benefit %f%%, divisor %i\n",
850 (int) badness, (double)edge->frequency / CGRAPH_FREQ_BASE, growth_for_all,
851 relative_time_benefit (callee_info, edge, time_growth) * 100 / 256.0, div);
854 /* When function local profile is not available or it does not give
855 useful information (ie frequency is zero), base the cost on
856 loop nest and overall size growth, so we optimize for overall number
857 of functions fully inlined in program. */
858 else
860 int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
861 badness = estimate_growth (callee) * 256;
863 /* Decrease badness if call is nested. */
864 if (badness > 0)
865 badness >>= nest;
866 else
868 badness <<= nest;
870 if (dump)
871 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
872 nest);
875 /* Ensure that we did not overflow in all the fixed point math above. */
876 gcc_assert (badness >= INT_MIN);
877 gcc_assert (badness <= INT_MAX - 1);
878 /* Make recursive inlining happen always after other inlining is done. */
879 if (cgraph_edge_recursive_p (edge))
880 return badness + 1;
881 else
882 return badness;
885 /* Recompute badness of EDGE and update its key in HEAP if needed. */
886 static inline void
887 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
889 int badness = edge_badness (edge, false);
890 if (edge->aux)
892 fibnode_t n = (fibnode_t) edge->aux;
893 gcc_checking_assert (n->data == edge);
895 /* fibheap_replace_key only decrease the keys.
896 When we increase the key we do not update heap
897 and instead re-insert the element once it becomes
898 a minimum of heap. */
899 if (badness < n->key)
901 if (dump_file && (dump_flags & TDF_DETAILS))
903 fprintf (dump_file,
904 " decreasing badness %s/%i -> %s/%i, %i to %i\n",
905 cgraph_node_name (edge->caller), edge->caller->uid,
906 cgraph_node_name (edge->callee), edge->callee->uid,
907 (int)n->key,
908 badness);
910 fibheap_replace_key (heap, n, badness);
911 gcc_checking_assert (n->key == badness);
914 else
916 if (dump_file && (dump_flags & TDF_DETAILS))
918 fprintf (dump_file,
919 " enqueuing call %s/%i -> %s/%i, badness %i\n",
920 cgraph_node_name (edge->caller), edge->caller->uid,
921 cgraph_node_name (edge->callee), edge->callee->uid,
922 badness);
924 edge->aux = fibheap_insert (heap, badness, edge);
929 /* NODE was inlined.
930 All caller edges needs to be resetted because
931 size estimates change. Similarly callees needs reset
932 because better context may be known. */
934 static void
935 reset_edge_caches (struct cgraph_node *node)
937 struct cgraph_edge *edge;
938 struct cgraph_edge *e = node->callees;
939 struct cgraph_node *where = node;
940 int i;
941 struct ipa_ref *ref;
943 if (where->global.inlined_to)
944 where = where->global.inlined_to;
946 /* WHERE body size has changed, the cached growth is invalid. */
947 reset_node_growth_cache (where);
949 for (edge = where->callers; edge; edge = edge->next_caller)
950 if (edge->inline_failed)
951 reset_edge_growth_cache (edge);
952 for (i = 0; ipa_ref_list_refering_iterate (&where->ref_list, i, ref); i++)
953 if (ref->use == IPA_REF_ALIAS)
954 reset_edge_caches (ipa_ref_refering_node (ref));
956 if (!e)
957 return;
959 while (true)
960 if (!e->inline_failed && e->callee->callees)
961 e = e->callee->callees;
962 else
964 if (e->inline_failed)
965 reset_edge_growth_cache (e);
966 if (e->next_callee)
967 e = e->next_callee;
968 else
972 if (e->caller == node)
973 return;
974 e = e->caller->callers;
976 while (!e->next_callee);
977 e = e->next_callee;
982 /* Recompute HEAP nodes for each of caller of NODE.
983 UPDATED_NODES track nodes we already visited, to avoid redundant work.
984 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
985 it is inlinable. Otherwise check all edges. */
987 static void
988 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
989 bitmap updated_nodes,
990 struct cgraph_edge *check_inlinablity_for)
992 struct cgraph_edge *edge;
993 int i;
994 struct ipa_ref *ref;
996 if ((!node->alias && !inline_summary (node)->inlinable)
997 || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
998 || node->global.inlined_to)
999 return;
1000 if (!bitmap_set_bit (updated_nodes, node->uid))
1001 return;
1003 for (i = 0; ipa_ref_list_refering_iterate (&node->ref_list, i, ref); i++)
1004 if (ref->use == IPA_REF_ALIAS)
1006 struct cgraph_node *alias = ipa_ref_refering_node (ref);
1007 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1010 for (edge = node->callers; edge; edge = edge->next_caller)
1011 if (edge->inline_failed)
1013 if (!check_inlinablity_for
1014 || check_inlinablity_for == edge)
1016 if (can_inline_edge_p (edge, false)
1017 && want_inline_small_function_p (edge, false))
1018 update_edge_key (heap, edge);
1019 else if (edge->aux)
1021 report_inline_failed_reason (edge);
1022 fibheap_delete_node (heap, (fibnode_t) edge->aux);
1023 edge->aux = NULL;
1026 else if (edge->aux)
1027 update_edge_key (heap, edge);
1031 /* Recompute HEAP nodes for each uninlined call in NODE.
1032 This is used when we know that edge badnesses are going only to increase
1033 (we introduced new call site) and thus all we need is to insert newly
1034 created edges into heap. */
1036 static void
1037 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
1038 bitmap updated_nodes)
1040 struct cgraph_edge *e = node->callees;
1042 if (!e)
1043 return;
1044 while (true)
1045 if (!e->inline_failed && e->callee->callees)
1046 e = e->callee->callees;
1047 else
1049 enum availability avail;
1050 struct cgraph_node *callee;
1051 /* We do not reset callee growth cache here. Since we added a new call,
1052 growth chould have just increased and consequentely badness metric
1053 don't need updating. */
1054 if (e->inline_failed
1055 && (callee = cgraph_function_or_thunk_node (e->callee, &avail))
1056 && inline_summary (callee)->inlinable
1057 && cgraph_function_body_availability (callee) >= AVAIL_AVAILABLE
1058 && !bitmap_bit_p (updated_nodes, callee->uid))
1060 if (can_inline_edge_p (e, false)
1061 && want_inline_small_function_p (e, false))
1062 update_edge_key (heap, e);
1063 else if (e->aux)
1065 report_inline_failed_reason (e);
1066 fibheap_delete_node (heap, (fibnode_t) e->aux);
1067 e->aux = NULL;
1070 if (e->next_callee)
1071 e = e->next_callee;
1072 else
1076 if (e->caller == node)
1077 return;
1078 e = e->caller->callers;
1080 while (!e->next_callee);
1081 e = e->next_callee;
1086 /* Recompute heap nodes for each of caller edges of each of callees.
1087 Walk recursively into all inline clones. */
1089 static void
1090 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
1091 bitmap updated_nodes)
1093 struct cgraph_edge *e = node->callees;
1094 if (!e)
1095 return;
1096 while (true)
1097 if (!e->inline_failed && e->callee->callees)
1098 e = e->callee->callees;
1099 else
1101 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee,
1102 NULL);
1104 /* We inlined and thus callees might have different number of calls.
1105 Reset their caches */
1106 reset_node_growth_cache (callee);
1107 if (e->inline_failed)
1108 update_caller_keys (heap, callee, updated_nodes, e);
1109 if (e->next_callee)
1110 e = e->next_callee;
1111 else
1115 if (e->caller == node)
1116 return;
1117 e = e->caller->callers;
1119 while (!e->next_callee);
1120 e = e->next_callee;
1125 /* Enqueue all recursive calls from NODE into priority queue depending on
1126 how likely we want to recursively inline the call. */
1128 static void
1129 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1130 fibheap_t heap)
1132 struct cgraph_edge *e;
1133 enum availability avail;
1135 for (e = where->callees; e; e = e->next_callee)
1136 if (e->callee == node
1137 || (cgraph_function_or_thunk_node (e->callee, &avail) == node
1138 && avail > AVAIL_OVERWRITABLE))
1140 /* When profile feedback is available, prioritize by expected number
1141 of calls. */
1142 fibheap_insert (heap,
1143 !max_count ? -e->frequency
1144 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
1147 for (e = where->callees; e; e = e->next_callee)
1148 if (!e->inline_failed)
1149 lookup_recursive_calls (node, e->callee, heap);
1152 /* Decide on recursive inlining: in the case function has recursive calls,
1153 inline until body size reaches given argument. If any new indirect edges
1154 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1155 is NULL. */
1157 static bool
1158 recursive_inlining (struct cgraph_edge *edge,
1159 VEC (cgraph_edge_p, heap) **new_edges)
1161 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1162 fibheap_t heap;
1163 struct cgraph_node *node;
1164 struct cgraph_edge *e;
1165 struct cgraph_node *master_clone = NULL, *next;
1166 int depth = 0;
1167 int n = 0;
1169 node = edge->caller;
1170 if (node->global.inlined_to)
1171 node = node->global.inlined_to;
1173 if (DECL_DECLARED_INLINE_P (node->decl))
1174 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1176 /* Make sure that function is small enough to be considered for inlining. */
1177 if (estimate_size_after_inlining (node, edge) >= limit)
1178 return false;
1179 heap = fibheap_new ();
1180 lookup_recursive_calls (node, node, heap);
1181 if (fibheap_empty (heap))
1183 fibheap_delete (heap);
1184 return false;
1187 if (dump_file)
1188 fprintf (dump_file,
1189 " Performing recursive inlining on %s\n",
1190 cgraph_node_name (node));
1192 /* Do the inlining and update list of recursive call during process. */
1193 while (!fibheap_empty (heap))
1195 struct cgraph_edge *curr
1196 = (struct cgraph_edge *) fibheap_extract_min (heap);
1197 struct cgraph_node *cnode;
1199 if (estimate_size_after_inlining (node, curr) > limit)
1200 break;
1202 if (!can_inline_edge_p (curr, true))
1203 continue;
1205 depth = 1;
1206 for (cnode = curr->caller;
1207 cnode->global.inlined_to; cnode = cnode->callers->caller)
1208 if (node->decl
1209 == cgraph_function_or_thunk_node (curr->callee, NULL)->decl)
1210 depth++;
1212 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1213 continue;
1215 if (dump_file)
1217 fprintf (dump_file,
1218 " Inlining call of depth %i", depth);
1219 if (node->count)
1221 fprintf (dump_file, " called approx. %.2f times per call",
1222 (double)curr->count / node->count);
1224 fprintf (dump_file, "\n");
1226 if (!master_clone)
1228 /* We need original clone to copy around. */
1229 master_clone = cgraph_clone_node (node, node->decl,
1230 node->count, CGRAPH_FREQ_BASE,
1231 false, NULL, true);
1232 for (e = master_clone->callees; e; e = e->next_callee)
1233 if (!e->inline_failed)
1234 clone_inlined_nodes (e, true, false, NULL);
1237 cgraph_redirect_edge_callee (curr, master_clone);
1238 inline_call (curr, false, new_edges, &overall_size);
1239 lookup_recursive_calls (node, curr->callee, heap);
1240 n++;
1243 if (!fibheap_empty (heap) && dump_file)
1244 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1245 fibheap_delete (heap);
1247 if (!master_clone)
1248 return false;
1250 if (dump_file)
1251 fprintf (dump_file,
1252 "\n Inlined %i times, "
1253 "body grown from size %i to %i, time %i to %i\n", n,
1254 inline_summary (master_clone)->size, inline_summary (node)->size,
1255 inline_summary (master_clone)->time, inline_summary (node)->time);
1257 /* Remove master clone we used for inlining. We rely that clones inlined
1258 into master clone gets queued just before master clone so we don't
1259 need recursion. */
1260 for (node = cgraph_nodes; node != master_clone;
1261 node = next)
1263 next = node->next;
1264 if (node->global.inlined_to == master_clone)
1265 cgraph_remove_node (node);
1267 cgraph_remove_node (master_clone);
1268 return true;
1272 /* Given whole compilation unit estimate of INSNS, compute how large we can
1273 allow the unit to grow. */
1275 static int
1276 compute_max_insns (int insns)
1278 int max_insns = insns;
1279 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1280 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1282 return ((HOST_WIDEST_INT) max_insns
1283 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1287 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1289 static void
1290 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
1292 while (VEC_length (cgraph_edge_p, new_edges) > 0)
1294 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
1296 gcc_assert (!edge->aux);
1297 if (edge->inline_failed
1298 && can_inline_edge_p (edge, true)
1299 && want_inline_small_function_p (edge, true))
1300 edge->aux = fibheap_insert (heap, edge_badness (edge, false), edge);
1305 /* We use greedy algorithm for inlining of small functions:
1306 All inline candidates are put into prioritized heap ordered in
1307 increasing badness.
1309 The inlining of small functions is bounded by unit growth parameters. */
1311 static void
1312 inline_small_functions (void)
1314 struct cgraph_node *node;
1315 struct cgraph_edge *edge;
1316 fibheap_t heap = fibheap_new ();
1317 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1318 int min_size, max_size;
1319 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1320 int initial_size = 0;
1322 if (flag_indirect_inlining)
1323 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1325 if (dump_file)
1326 fprintf (dump_file,
1327 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1328 initial_size);
1330 /* Compute overall unit size and other global parameters used by badness
1331 metrics. */
1333 max_count = 0;
1334 initialize_growth_caches ();
1336 FOR_EACH_DEFINED_FUNCTION (node)
1337 if (!node->global.inlined_to)
1339 if (cgraph_function_with_gimple_body_p (node)
1340 || node->thunk.thunk_p)
1342 struct inline_summary *info = inline_summary (node);
1344 if (!DECL_EXTERNAL (node->decl))
1345 initial_size += info->size;
1348 for (edge = node->callers; edge; edge = edge->next_caller)
1349 if (max_count < edge->count)
1350 max_count = edge->count;
1353 overall_size = initial_size;
1354 max_size = compute_max_insns (overall_size);
1355 min_size = overall_size;
1357 /* Populate the heeap with all edges we might inline. */
1359 FOR_EACH_DEFINED_FUNCTION (node)
1360 if (!node->global.inlined_to)
1362 if (dump_file)
1363 fprintf (dump_file, "Enqueueing calls of %s/%i.\n",
1364 cgraph_node_name (node), node->uid);
1366 for (edge = node->callers; edge; edge = edge->next_caller)
1367 if (edge->inline_failed
1368 && can_inline_edge_p (edge, true)
1369 && want_inline_small_function_p (edge, true)
1370 && edge->inline_failed)
1372 gcc_assert (!edge->aux);
1373 update_edge_key (heap, edge);
1377 gcc_assert (in_lto_p
1378 || !max_count
1379 || (profile_info && flag_branch_probabilities));
1381 while (!fibheap_empty (heap))
1383 int old_size = overall_size;
1384 struct cgraph_node *where, *callee;
1385 int badness = fibheap_min_key (heap);
1386 int current_badness;
1387 int cached_badness;
1388 int growth;
1390 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1391 gcc_assert (edge->aux);
1392 edge->aux = NULL;
1393 if (!edge->inline_failed)
1394 continue;
1396 /* Be sure that caches are maintained consistent.
1397 We can not make this ENABLE_CHECKING only because it cause differnt
1398 updates of the fibheap queue. */
1399 cached_badness = edge_badness (edge, false);
1400 reset_edge_growth_cache (edge);
1401 reset_node_growth_cache (edge->callee);
1403 /* When updating the edge costs, we only decrease badness in the keys.
1404 Increases of badness are handled lazilly; when we see key with out
1405 of date value on it, we re-insert it now. */
1406 current_badness = edge_badness (edge, false);
1407 gcc_assert (cached_badness == current_badness);
1408 gcc_assert (current_badness >= badness);
1409 if (current_badness != badness)
1411 edge->aux = fibheap_insert (heap, current_badness, edge);
1412 continue;
1415 if (!can_inline_edge_p (edge, true))
1416 continue;
1418 callee = cgraph_function_or_thunk_node (edge->callee, NULL);
1419 growth = estimate_edge_growth (edge);
1420 if (dump_file)
1422 fprintf (dump_file,
1423 "\nConsidering %s with %i size\n",
1424 cgraph_node_name (callee),
1425 inline_summary (callee)->size);
1426 fprintf (dump_file,
1427 " to be inlined into %s in %s:%i\n"
1428 " Estimated growth after inlined into all is %+i insns.\n"
1429 " Estimated badness is %i, frequency %.2f.\n",
1430 cgraph_node_name (edge->caller),
1431 flag_wpa ? "unknown"
1432 : gimple_filename ((const_gimple) edge->call_stmt),
1433 flag_wpa ? -1
1434 : gimple_lineno ((const_gimple) edge->call_stmt),
1435 estimate_growth (callee),
1436 badness,
1437 edge->frequency / (double)CGRAPH_FREQ_BASE);
1438 if (edge->count)
1439 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n",
1440 edge->count);
1441 if (dump_flags & TDF_DETAILS)
1442 edge_badness (edge, true);
1445 if (overall_size + growth > max_size
1446 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1448 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1449 report_inline_failed_reason (edge);
1450 continue;
1453 if (!want_inline_small_function_p (edge, true))
1454 continue;
1456 /* Heuristics for inlining small functions works poorly for
1457 recursive calls where we do efect similar to loop unrolling.
1458 When inliing such edge seems profitable, leave decision on
1459 specific inliner. */
1460 if (cgraph_edge_recursive_p (edge))
1462 where = edge->caller;
1463 if (where->global.inlined_to)
1464 where = where->global.inlined_to;
1465 if (!recursive_inlining (edge,
1466 flag_indirect_inlining
1467 ? &new_indirect_edges : NULL))
1469 edge->inline_failed = CIF_RECURSIVE_INLINING;
1470 continue;
1472 reset_edge_caches (where);
1473 /* Recursive inliner inlines all recursive calls of the function
1474 at once. Consequently we need to update all callee keys. */
1475 if (flag_indirect_inlining)
1476 add_new_edges_to_heap (heap, new_indirect_edges);
1477 update_all_callee_keys (heap, where, updated_nodes);
1479 else
1481 struct cgraph_node *outer_node = NULL;
1482 int depth = 0;
1484 /* Consider the case where self recursive function A is inlined into B.
1485 This is desired optimization in some cases, since it leads to effect
1486 similar of loop peeling and we might completely optimize out the
1487 recursive call. However we must be extra selective. */
1489 where = edge->caller;
1490 while (where->global.inlined_to)
1492 if (where->decl == callee->decl)
1493 outer_node = where, depth++;
1494 where = where->callers->caller;
1496 if (outer_node
1497 && !want_inline_self_recursive_call_p (edge, outer_node,
1498 true, depth))
1500 edge->inline_failed
1501 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
1502 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1503 continue;
1505 else if (depth && dump_file)
1506 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
1508 gcc_checking_assert (!callee->global.inlined_to);
1509 inline_call (edge, true, &new_indirect_edges, &overall_size);
1510 if (flag_indirect_inlining)
1511 add_new_edges_to_heap (heap, new_indirect_edges);
1513 reset_edge_caches (edge->callee);
1514 reset_node_growth_cache (callee);
1516 /* We inlined last offline copy to the body. This might lead
1517 to callees of function having fewer call sites and thus they
1518 may need updating.
1520 FIXME: the callee size could also shrink because more information
1521 is propagated from caller. We don't track when this happen and
1522 thus we need to recompute everything all the time. Once this is
1523 solved, "|| 1" should go away. */
1524 if (callee->global.inlined_to || 1)
1525 update_all_callee_keys (heap, callee, updated_nodes);
1526 else
1527 update_callee_keys (heap, edge->callee, updated_nodes);
1529 where = edge->caller;
1530 if (where->global.inlined_to)
1531 where = where->global.inlined_to;
1533 /* Our profitability metric can depend on local properties
1534 such as number of inlinable calls and size of the function body.
1535 After inlining these properties might change for the function we
1536 inlined into (since it's body size changed) and for the functions
1537 called by function we inlined (since number of it inlinable callers
1538 might change). */
1539 update_caller_keys (heap, where, updated_nodes, NULL);
1541 /* We removed one call of the function we just inlined. If offline
1542 copy is still needed, be sure to update the keys. */
1543 if (callee != where && !callee->global.inlined_to)
1544 update_caller_keys (heap, callee, updated_nodes, NULL);
1545 bitmap_clear (updated_nodes);
1547 if (dump_file)
1549 fprintf (dump_file,
1550 " Inlined into %s which now has time %i and size %i,"
1551 "net change of %+i.\n",
1552 cgraph_node_name (edge->caller),
1553 inline_summary (edge->caller)->time,
1554 inline_summary (edge->caller)->size,
1555 overall_size - old_size);
1557 if (min_size > overall_size)
1559 min_size = overall_size;
1560 max_size = compute_max_insns (min_size);
1562 if (dump_file)
1563 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1567 free_growth_caches ();
1568 if (new_indirect_edges)
1569 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1570 fibheap_delete (heap);
1571 if (dump_file)
1572 fprintf (dump_file,
1573 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1574 initial_size, overall_size,
1575 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
1576 BITMAP_FREE (updated_nodes);
1579 /* Flatten NODE. Performed both during early inlining and
1580 at IPA inlining time. */
1582 static void
1583 flatten_function (struct cgraph_node *node, bool early)
1585 struct cgraph_edge *e;
1587 /* We shouldn't be called recursively when we are being processed. */
1588 gcc_assert (node->aux == NULL);
1590 node->aux = (void *) node;
1592 for (e = node->callees; e; e = e->next_callee)
1594 struct cgraph_node *orig_callee;
1595 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
1597 /* We've hit cycle? It is time to give up. */
1598 if (callee->aux)
1600 if (dump_file)
1601 fprintf (dump_file,
1602 "Not inlining %s into %s to avoid cycle.\n",
1603 cgraph_node_name (callee),
1604 cgraph_node_name (e->caller));
1605 e->inline_failed = CIF_RECURSIVE_INLINING;
1606 continue;
1609 /* When the edge is already inlined, we just need to recurse into
1610 it in order to fully flatten the leaves. */
1611 if (!e->inline_failed)
1613 flatten_function (callee, early);
1614 continue;
1617 /* Flatten attribute needs to be processed during late inlining. For
1618 extra code quality we however do flattening during early optimization,
1619 too. */
1620 if (!early
1621 ? !can_inline_edge_p (e, true)
1622 : !can_early_inline_edge_p (e))
1623 continue;
1625 if (cgraph_edge_recursive_p (e))
1627 if (dump_file)
1628 fprintf (dump_file, "Not inlining: recursive call.\n");
1629 continue;
1632 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1633 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
1635 if (dump_file)
1636 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1637 continue;
1640 /* Inline the edge and flatten the inline clone. Avoid
1641 recursing through the original node if the node was cloned. */
1642 if (dump_file)
1643 fprintf (dump_file, " Inlining %s into %s.\n",
1644 cgraph_node_name (callee),
1645 cgraph_node_name (e->caller));
1646 orig_callee = callee;
1647 inline_call (e, true, NULL, NULL);
1648 if (e->callee != orig_callee)
1649 orig_callee->aux = (void *) node;
1650 flatten_function (e->callee, early);
1651 if (e->callee != orig_callee)
1652 orig_callee->aux = NULL;
1655 node->aux = NULL;
1658 /* Decide on the inlining. We do so in the topological order to avoid
1659 expenses on updating data structures. */
1661 static unsigned int
1662 ipa_inline (void)
1664 struct cgraph_node *node;
1665 int nnodes;
1666 struct cgraph_node **order =
1667 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1668 int i;
1670 if (in_lto_p && optimize)
1671 ipa_update_after_lto_read ();
1673 if (dump_file)
1674 dump_inline_summaries (dump_file);
1676 nnodes = ipa_reverse_postorder (order);
1678 for (node = cgraph_nodes; node; node = node->next)
1679 node->aux = 0;
1681 if (dump_file)
1682 fprintf (dump_file, "\nFlattening functions:\n");
1684 /* In the first pass handle functions to be flattened. Do this with
1685 a priority so none of our later choices will make this impossible. */
1686 for (i = nnodes - 1; i >= 0; i--)
1688 node = order[i];
1690 /* Handle nodes to be flattened.
1691 Ideally when processing callees we stop inlining at the
1692 entry of cycles, possibly cloning that entry point and
1693 try to flatten itself turning it into a self-recursive
1694 function. */
1695 if (lookup_attribute ("flatten",
1696 DECL_ATTRIBUTES (node->decl)) != NULL)
1698 if (dump_file)
1699 fprintf (dump_file,
1700 "Flattening %s\n", cgraph_node_name (node));
1701 flatten_function (node, false);
1705 inline_small_functions ();
1706 cgraph_remove_unreachable_nodes (true, dump_file);
1707 free (order);
1709 /* We already perform some inlining of functions called once during
1710 inlining small functions above. After unreachable nodes are removed,
1711 we still might do a quick check that nothing new is found. */
1712 if (flag_inline_functions_called_once)
1714 int cold;
1715 if (dump_file)
1716 fprintf (dump_file, "\nDeciding on functions called once:\n");
1718 /* Inlining one function called once has good chance of preventing
1719 inlining other function into the same callee. Ideally we should
1720 work in priority order, but probably inlining hot functions first
1721 is good cut without the extra pain of maintaining the queue.
1723 ??? this is not really fitting the bill perfectly: inlining function
1724 into callee often leads to better optimization of callee due to
1725 increased context for optimization.
1726 For example if main() function calls a function that outputs help
1727 and then function that does the main optmization, we should inline
1728 the second with priority even if both calls are cold by themselves.
1730 We probably want to implement new predicate replacing our use of
1731 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
1732 to be hot. */
1733 for (cold = 0; cold <= 1; cold ++)
1735 for (node = cgraph_nodes; node; node = node->next)
1737 if (want_inline_function_called_once_p (node)
1738 && (cold
1739 || cgraph_maybe_hot_edge_p (node->callers)))
1741 struct cgraph_node *caller = node->callers->caller;
1743 if (dump_file)
1745 fprintf (dump_file,
1746 "\nInlining %s size %i.\n",
1747 cgraph_node_name (node), inline_summary (node)->size);
1748 fprintf (dump_file,
1749 " Called once from %s %i insns.\n",
1750 cgraph_node_name (node->callers->caller),
1751 inline_summary (node->callers->caller)->size);
1754 inline_call (node->callers, true, NULL, NULL);
1755 if (dump_file)
1756 fprintf (dump_file,
1757 " Inlined into %s which now has %i size\n",
1758 cgraph_node_name (caller),
1759 inline_summary (caller)->size);
1765 /* Free ipa-prop structures if they are no longer needed. */
1766 if (optimize)
1767 ipa_free_all_structures_after_iinln ();
1769 if (dump_file)
1770 fprintf (dump_file,
1771 "\nInlined %i calls, eliminated %i functions\n\n",
1772 ncalls_inlined, nfunctions_inlined);
1774 if (dump_file)
1775 dump_inline_summaries (dump_file);
1776 /* In WPA we use inline summaries for partitioning process. */
1777 if (!flag_wpa)
1778 inline_free_summary ();
1779 return 0;
1782 /* Inline always-inline function calls in NODE. */
1784 static bool
1785 inline_always_inline_functions (struct cgraph_node *node)
1787 struct cgraph_edge *e;
1788 bool inlined = false;
1790 for (e = node->callees; e; e = e->next_callee)
1792 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
1793 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1794 continue;
1796 if (cgraph_edge_recursive_p (e))
1798 if (dump_file)
1799 fprintf (dump_file, " Not inlining recursive call to %s.\n",
1800 cgraph_node_name (e->callee));
1801 e->inline_failed = CIF_RECURSIVE_INLINING;
1802 continue;
1805 if (!can_early_inline_edge_p (e))
1806 continue;
1808 if (dump_file)
1809 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
1810 cgraph_node_name (e->callee),
1811 cgraph_node_name (e->caller));
1812 inline_call (e, true, NULL, NULL);
1813 inlined = true;
1816 return inlined;
1819 /* Decide on the inlining. We do so in the topological order to avoid
1820 expenses on updating data structures. */
1822 static bool
1823 early_inline_small_functions (struct cgraph_node *node)
1825 struct cgraph_edge *e;
1826 bool inlined = false;
1828 for (e = node->callees; e; e = e->next_callee)
1830 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
1831 if (!inline_summary (callee)->inlinable
1832 || !e->inline_failed)
1833 continue;
1835 /* Do not consider functions not declared inline. */
1836 if (!DECL_DECLARED_INLINE_P (callee->decl)
1837 && !flag_inline_small_functions
1838 && !flag_inline_functions)
1839 continue;
1841 if (dump_file)
1842 fprintf (dump_file, "Considering inline candidate %s.\n",
1843 cgraph_node_name (callee));
1845 if (!can_early_inline_edge_p (e))
1846 continue;
1848 if (cgraph_edge_recursive_p (e))
1850 if (dump_file)
1851 fprintf (dump_file, " Not inlining: recursive call.\n");
1852 continue;
1855 if (!want_early_inline_function_p (e))
1856 continue;
1858 if (dump_file)
1859 fprintf (dump_file, " Inlining %s into %s.\n",
1860 cgraph_node_name (callee),
1861 cgraph_node_name (e->caller));
1862 inline_call (e, true, NULL, NULL);
1863 inlined = true;
1866 return inlined;
1869 /* Do inlining of small functions. Doing so early helps profiling and other
1870 passes to be somewhat more effective and avoids some code duplication in
1871 later real inlining pass for testcases with very many function calls. */
1872 static unsigned int
1873 early_inliner (void)
1875 struct cgraph_node *node = cgraph_get_node (current_function_decl);
1876 struct cgraph_edge *edge;
1877 unsigned int todo = 0;
1878 int iterations = 0;
1879 bool inlined = false;
1881 if (seen_error ())
1882 return 0;
1884 /* Do nothing if datastructures for ipa-inliner are already computed. This
1885 happens when some pass decides to construct new function and
1886 cgraph_add_new_function calls lowering passes and early optimization on
1887 it. This may confuse ourself when early inliner decide to inline call to
1888 function clone, because function clones don't have parameter list in
1889 ipa-prop matching their signature. */
1890 if (ipa_node_params_vector)
1891 return 0;
1893 #ifdef ENABLE_CHECKING
1894 verify_cgraph_node (node);
1895 #endif
1897 /* Even when not optimizing or not inlining inline always-inline
1898 functions. */
1899 inlined = inline_always_inline_functions (node);
1901 if (!optimize
1902 || flag_no_inline
1903 || !flag_early_inlining
1904 /* Never inline regular functions into always-inline functions
1905 during incremental inlining. This sucks as functions calling
1906 always inline functions will get less optimized, but at the
1907 same time inlining of functions calling always inline
1908 function into an always inline function might introduce
1909 cycles of edges to be always inlined in the callgraph.
1911 We might want to be smarter and just avoid this type of inlining. */
1912 || DECL_DISREGARD_INLINE_LIMITS (node->decl))
1914 else if (lookup_attribute ("flatten",
1915 DECL_ATTRIBUTES (node->decl)) != NULL)
1917 /* When the function is marked to be flattened, recursively inline
1918 all calls in it. */
1919 if (dump_file)
1920 fprintf (dump_file,
1921 "Flattening %s\n", cgraph_node_name (node));
1922 flatten_function (node, true);
1923 inlined = true;
1925 else
1927 /* We iterate incremental inlining to get trivial cases of indirect
1928 inlining. */
1929 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1930 && early_inline_small_functions (node))
1932 timevar_push (TV_INTEGRATION);
1933 todo |= optimize_inline_calls (current_function_decl);
1935 /* Technically we ought to recompute inline parameters so the new
1936 iteration of early inliner works as expected. We however have
1937 values approximately right and thus we only need to update edge
1938 info that might be cleared out for newly discovered edges. */
1939 for (edge = node->callees; edge; edge = edge->next_callee)
1941 struct inline_edge_summary *es = inline_edge_summary (edge);
1942 es->call_stmt_size
1943 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
1944 es->call_stmt_time
1945 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
1947 timevar_pop (TV_INTEGRATION);
1948 iterations++;
1949 inlined = false;
1951 if (dump_file)
1952 fprintf (dump_file, "Iterations: %i\n", iterations);
1955 if (inlined)
1957 timevar_push (TV_INTEGRATION);
1958 todo |= optimize_inline_calls (current_function_decl);
1959 timevar_pop (TV_INTEGRATION);
1962 cfun->always_inline_functions_inlined = true;
1964 return todo;
1967 struct gimple_opt_pass pass_early_inline =
1970 GIMPLE_PASS,
1971 "einline", /* name */
1972 NULL, /* gate */
1973 early_inliner, /* execute */
1974 NULL, /* sub */
1975 NULL, /* next */
1976 0, /* static_pass_number */
1977 TV_INLINE_HEURISTICS, /* tv_id */
1978 PROP_ssa, /* properties_required */
1979 0, /* properties_provided */
1980 0, /* properties_destroyed */
1981 0, /* todo_flags_start */
1982 0 /* todo_flags_finish */
1987 /* When to run IPA inlining. Inlining of always-inline functions
1988 happens during early inlining.
1990 Enable inlining unconditoinally at -flto. We need size estimates to
1991 drive partitioning. */
1993 static bool
1994 gate_ipa_inline (void)
1996 return optimize || flag_lto || flag_wpa;
1999 struct ipa_opt_pass_d pass_ipa_inline =
2002 IPA_PASS,
2003 "inline", /* name */
2004 gate_ipa_inline, /* gate */
2005 ipa_inline, /* execute */
2006 NULL, /* sub */
2007 NULL, /* next */
2008 0, /* static_pass_number */
2009 TV_INLINE_HEURISTICS, /* tv_id */
2010 0, /* properties_required */
2011 0, /* properties_provided */
2012 0, /* properties_destroyed */
2013 TODO_remove_functions, /* todo_flags_finish */
2014 TODO_dump_cgraph
2015 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
2017 inline_generate_summary, /* generate_summary */
2018 inline_write_summary, /* write_summary */
2019 inline_read_summary, /* read_summary */
2020 NULL, /* write_optimization_summary */
2021 NULL, /* read_optimization_summary */
2022 NULL, /* stmt_fixup */
2023 0, /* TODOs */
2024 inline_transform, /* function_transform */
2025 NULL, /* variable_transform */