rs6000.c (rs6000_cannot_force_const_mem): Match CONST high part large-toc address.
[official-gcc.git] / gcc / ipa-inline.c
blobff1041ba493bc182914267de9202638dd5afecf7
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 The implementation of inliner is organized as follows:
26 inlining heuristics limits
28 can_inline_edge_p allow to check that particular inlining is allowed
29 by the limits specified by user (allowed function growth, growth and so
30 on).
32 Functions are inlined when it is obvious the result is profitable (such
33 as functions called once or when inlining reduce code size).
34 In addition to that we perform inlining of small functions and recursive
35 inlining.
37 inlining heuristics
39 The inliner itself is split into two passes:
41 pass_early_inlining
43 Simple local inlining pass inlining callees into current function.
44 This pass makes no use of whole unit analysis and thus it can do only
45 very simple decisions based on local properties.
47 The strength of the pass is that it is run in topological order
48 (reverse postorder) on the callgraph. Functions are converted into SSA
49 form just before this pass and optimized subsequently. As a result, the
50 callees of the function seen by the early inliner was already optimized
51 and results of early inlining adds a lot of optimization opportunities
52 for the local optimization.
54 The pass handle the obvious inlining decisions within the compilation
55 unit - inlining auto inline functions, inlining for size and
56 flattening.
58 main strength of the pass is the ability to eliminate abstraction
59 penalty in C++ code (via combination of inlining and early
60 optimization) and thus improve quality of analysis done by real IPA
61 optimizers.
63 Because of lack of whole unit knowledge, the pass can not really make
64 good code size/performance tradeoffs. It however does very simple
65 speculative inlining allowing code size to grow by
66 EARLY_INLINING_INSNS when callee is leaf function. In this case the
67 optimizations performed later are very likely to eliminate the cost.
69 pass_ipa_inline
71 This is the real inliner able to handle inlining with whole program
72 knowledge. It performs following steps:
74 1) inlining of small functions. This is implemented by greedy
75 algorithm ordering all inlinable cgraph edges by their badness and
76 inlining them in this order as long as inline limits allows doing so.
78 This heuristics is not very good on inlining recursive calls. Recursive
79 calls can be inlined with results similar to loop unrolling. To do so,
80 special purpose recursive inliner is executed on function when
81 recursive edge is met as viable candidate.
83 2) Unreachable functions are removed from callgraph. Inlining leads
84 to devirtualization and other modification of callgraph so functions
85 may become unreachable during the process. Also functions declared as
86 extern inline or virtual functions are removed, since after inlining
87 we no longer need the offline bodies.
89 3) Functions called once and not exported from the unit are inlined.
90 This should almost always lead to reduction of code size by eliminating
91 the need for offline copy of the function. */
93 #include "config.h"
94 #include "system.h"
95 #include "coretypes.h"
96 #include "tm.h"
97 #include "tree.h"
98 #include "tree-inline.h"
99 #include "langhooks.h"
100 #include "flags.h"
101 #include "cgraph.h"
102 #include "diagnostic.h"
103 #include "gimple-pretty-print.h"
104 #include "timevar.h"
105 #include "params.h"
106 #include "fibheap.h"
107 #include "intl.h"
108 #include "tree-pass.h"
109 #include "coverage.h"
110 #include "ggc.h"
111 #include "rtl.h"
112 #include "tree-flow.h"
113 #include "ipa-prop.h"
114 #include "except.h"
115 #include "target.h"
116 #include "ipa-inline.h"
117 #include "ipa-utils.h"
119 /* Statistics we collect about inlining algorithm. */
120 static int overall_size;
121 static gcov_type max_count;
123 /* Return false when inlining edge E would lead to violating
124 limits on function unit growth or stack usage growth.
126 The relative function body growth limit is present generally
127 to avoid problems with non-linear behavior of the compiler.
128 To allow inlining huge functions into tiny wrapper, the limit
129 is always based on the bigger of the two functions considered.
131 For stack growth limits we always base the growth in stack usage
132 of the callers. We want to prevent applications from segfaulting
133 on stack overflow when functions with huge stack frames gets
134 inlined. */
136 static bool
137 caller_growth_limits (struct cgraph_edge *e)
139 struct cgraph_node *to = e->caller;
140 struct cgraph_node *what = cgraph_function_or_thunk_node (e->callee, NULL);
141 int newsize;
142 int limit = 0;
143 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
144 struct inline_summary *info, *what_info, *outer_info = inline_summary (to);
146 /* Look for function e->caller is inlined to. While doing
147 so work out the largest function body on the way. As
148 described above, we want to base our function growth
149 limits based on that. Not on the self size of the
150 outer function, not on the self size of inline code
151 we immediately inline to. This is the most relaxed
152 interpretation of the rule "do not grow large functions
153 too much in order to prevent compiler from exploding". */
154 while (true)
156 info = inline_summary (to);
157 if (limit < info->self_size)
158 limit = info->self_size;
159 if (stack_size_limit < info->estimated_self_stack_size)
160 stack_size_limit = info->estimated_self_stack_size;
161 if (to->global.inlined_to)
162 to = to->callers->caller;
163 else
164 break;
167 what_info = inline_summary (what);
169 if (limit < what_info->self_size)
170 limit = what_info->self_size;
172 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
174 /* Check the size after inlining against the function limits. But allow
175 the function to shrink if it went over the limits by forced inlining. */
176 newsize = estimate_size_after_inlining (to, e);
177 if (newsize >= info->size
178 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
179 && newsize > limit)
181 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
182 return false;
185 if (!what_info->estimated_stack_size)
186 return true;
188 /* FIXME: Stack size limit often prevents inlining in Fortran programs
189 due to large i/o datastructures used by the Fortran front-end.
190 We ought to ignore this limit when we know that the edge is executed
191 on every invocation of the caller (i.e. its call statement dominates
192 exit block). We do not track this information, yet. */
193 stack_size_limit += ((gcov_type)stack_size_limit
194 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
196 inlined_stack = (outer_info->stack_frame_offset
197 + outer_info->estimated_self_stack_size
198 + what_info->estimated_stack_size);
199 /* Check new stack consumption with stack consumption at the place
200 stack is used. */
201 if (inlined_stack > stack_size_limit
202 /* If function already has large stack usage from sibling
203 inline call, we can inline, too.
204 This bit overoptimistically assume that we are good at stack
205 packing. */
206 && inlined_stack > info->estimated_stack_size
207 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
209 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
210 return false;
212 return true;
215 /* Dump info about why inlining has failed. */
217 static void
218 report_inline_failed_reason (struct cgraph_edge *e)
220 if (dump_file)
222 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
223 cgraph_node_name (e->caller), e->caller->uid,
224 cgraph_node_name (e->callee), e->callee->uid,
225 cgraph_inline_failed_string (e->inline_failed));
229 /* Decide if we can inline the edge and possibly update
230 inline_failed reason.
231 We check whether inlining is possible at all and whether
232 caller growth limits allow doing so.
234 if REPORT is true, output reason to the dump file. */
236 static bool
237 can_inline_edge_p (struct cgraph_edge *e, bool report)
239 bool inlinable = true;
240 enum availability avail;
241 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, &avail);
242 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e->caller->decl);
243 tree callee_tree = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
245 gcc_assert (e->inline_failed);
247 if (!callee || !callee->analyzed)
249 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
250 inlinable = false;
252 else if (!inline_summary (callee)->inlinable)
254 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
255 inlinable = false;
257 else if (avail <= AVAIL_OVERWRITABLE)
259 e->inline_failed = CIF_OVERWRITABLE;
260 return false;
262 else if (e->call_stmt_cannot_inline_p)
264 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
265 inlinable = false;
267 /* Don't inline if the functions have different EH personalities. */
268 else if (DECL_FUNCTION_PERSONALITY (e->caller->decl)
269 && DECL_FUNCTION_PERSONALITY (callee->decl)
270 && (DECL_FUNCTION_PERSONALITY (e->caller->decl)
271 != DECL_FUNCTION_PERSONALITY (callee->decl)))
273 e->inline_failed = CIF_EH_PERSONALITY;
274 inlinable = false;
276 /* Don't inline if the callee can throw non-call exceptions but the
277 caller cannot.
278 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
279 Move the flag into cgraph node or mirror it in the inline summary. */
280 else if (DECL_STRUCT_FUNCTION (callee->decl)
281 && DECL_STRUCT_FUNCTION
282 (callee->decl)->can_throw_non_call_exceptions
283 && !(DECL_STRUCT_FUNCTION (e->caller->decl)
284 && DECL_STRUCT_FUNCTION
285 (e->caller->decl)->can_throw_non_call_exceptions))
287 e->inline_failed = CIF_NON_CALL_EXCEPTIONS;
288 inlinable = false;
290 /* Check compatibility of target optimization options. */
291 else if (!targetm.target_option.can_inline_p (e->caller->decl,
292 callee->decl))
294 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
295 inlinable = false;
297 /* Check if caller growth allows the inlining. */
298 else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
299 && !lookup_attribute ("flatten",
300 DECL_ATTRIBUTES
301 (e->caller->global.inlined_to
302 ? e->caller->global.inlined_to->decl
303 : e->caller->decl))
304 && !caller_growth_limits (e))
305 inlinable = false;
306 /* Don't inline a function with a higher optimization level than the
307 caller. FIXME: this is really just tip of iceberg of handling
308 optimization attribute. */
309 else if (caller_tree != callee_tree)
311 struct cl_optimization *caller_opt
312 = TREE_OPTIMIZATION ((caller_tree)
313 ? caller_tree
314 : optimization_default_node);
316 struct cl_optimization *callee_opt
317 = TREE_OPTIMIZATION ((callee_tree)
318 ? callee_tree
319 : optimization_default_node);
321 if ((caller_opt->x_optimize > callee_opt->x_optimize)
322 || (caller_opt->x_optimize_size != callee_opt->x_optimize_size))
324 e->inline_failed = CIF_TARGET_OPTIMIZATION_MISMATCH;
325 inlinable = false;
329 /* Be sure that the cannot_inline_p flag is up to date. */
330 gcc_checking_assert (!e->call_stmt
331 || (gimple_call_cannot_inline_p (e->call_stmt)
332 == e->call_stmt_cannot_inline_p)
333 /* In -flto-partition=none mode we really keep things out of
334 sync because call_stmt_cannot_inline_p is set at cgraph
335 merging when function bodies are not there yet. */
336 || (in_lto_p && !gimple_call_cannot_inline_p (e->call_stmt)));
337 if (!inlinable && report)
338 report_inline_failed_reason (e);
339 return inlinable;
343 /* Return true if the edge E is inlinable during early inlining. */
345 static bool
346 can_early_inline_edge_p (struct cgraph_edge *e)
348 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee,
349 NULL);
350 /* Early inliner might get called at WPA stage when IPA pass adds new
351 function. In this case we can not really do any of early inlining
352 because function bodies are missing. */
353 if (!gimple_has_body_p (callee->decl))
355 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
356 return false;
358 /* In early inliner some of callees may not be in SSA form yet
359 (i.e. the callgraph is cyclic and we did not process
360 the callee by early inliner, yet). We don't have CIF code for this
361 case; later we will re-do the decision in the real inliner. */
362 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
363 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
365 if (dump_file)
366 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
367 return false;
369 if (!can_inline_edge_p (e, true))
370 return false;
371 return true;
375 /* Return true when N is leaf function. Accept cheap builtins
376 in leaf functions. */
378 static bool
379 leaf_node_p (struct cgraph_node *n)
381 struct cgraph_edge *e;
382 for (e = n->callees; e; e = e->next_callee)
383 if (!is_inexpensive_builtin (e->callee->decl))
384 return false;
385 return true;
389 /* Return true if we are interested in inlining small function. */
391 static bool
392 want_early_inline_function_p (struct cgraph_edge *e)
394 bool want_inline = true;
395 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
397 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
399 else if (!DECL_DECLARED_INLINE_P (callee->decl)
400 && !flag_inline_small_functions)
402 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
403 report_inline_failed_reason (e);
404 want_inline = false;
406 else
408 int growth = estimate_edge_growth (e);
409 if (growth <= 0)
411 else if (!cgraph_maybe_hot_edge_p (e)
412 && growth > 0)
414 if (dump_file)
415 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
416 "call is cold and code would grow by %i\n",
417 cgraph_node_name (e->caller), e->caller->uid,
418 cgraph_node_name (callee), callee->uid,
419 growth);
420 want_inline = false;
422 else if (!leaf_node_p (callee)
423 && growth > 0)
425 if (dump_file)
426 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
427 "callee is not leaf and code would grow by %i\n",
428 cgraph_node_name (e->caller), e->caller->uid,
429 cgraph_node_name (callee), callee->uid,
430 growth);
431 want_inline = false;
433 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
435 if (dump_file)
436 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
437 "growth %i exceeds --param early-inlining-insns\n",
438 cgraph_node_name (e->caller), e->caller->uid,
439 cgraph_node_name (callee), callee->uid,
440 growth);
441 want_inline = false;
444 return want_inline;
447 /* Return true if we are interested in inlining small function.
448 When REPORT is true, report reason to dump file. */
450 static bool
451 want_inline_small_function_p (struct cgraph_edge *e, bool report)
453 bool want_inline = true;
454 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
456 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
458 else if (!DECL_DECLARED_INLINE_P (callee->decl)
459 && !flag_inline_small_functions)
461 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
462 want_inline = false;
464 else
466 int growth = estimate_edge_growth (e);
468 if (growth <= 0)
470 else if (DECL_DECLARED_INLINE_P (callee->decl)
471 && growth >= MAX_INLINE_INSNS_SINGLE)
473 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
474 want_inline = false;
476 else if (!DECL_DECLARED_INLINE_P (callee->decl)
477 && !flag_inline_functions)
479 e->inline_failed = CIF_NOT_DECLARED_INLINED;
480 want_inline = false;
482 else if (!DECL_DECLARED_INLINE_P (callee->decl)
483 && growth >= MAX_INLINE_INSNS_AUTO)
485 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
486 want_inline = false;
488 /* If call is cold, do not inline when function body would grow.
489 Still inline when the overall unit size will shrink because the offline
490 copy of function being eliminated.
492 This is slightly wrong on aggressive side: it is entirely possible
493 that function is called many times with a context where inlining
494 reduces code size and few times with a context where inlining increase
495 code size. Resoluting growth estimate will be negative even if it
496 would make more sense to keep offline copy and do not inline into the
497 call sites that makes the code size grow.
499 When badness orders the calls in a way that code reducing calls come
500 first, this situation is not a problem at all: after inlining all
501 "good" calls, we will realize that keeping the function around is
502 better. */
503 else if (!cgraph_maybe_hot_edge_p (e)
504 && (DECL_EXTERNAL (callee->decl)
506 /* Unlike for functions called once, we play unsafe with
507 COMDATs. We can allow that since we know functions
508 in consideration are small (and thus risk is small) and
509 moreover grow estimates already accounts that COMDAT
510 functions may or may not disappear when eliminated from
511 current unit. With good probability making aggressive
512 choice in all units is going to make overall program
513 smaller.
515 Consequently we ask cgraph_can_remove_if_no_direct_calls_p
516 instead of
517 cgraph_will_be_removed_from_program_if_no_direct_calls */
519 || !cgraph_can_remove_if_no_direct_calls_p (callee)
520 || estimate_growth (callee) > 0))
522 e->inline_failed = CIF_UNLIKELY_CALL;
523 want_inline = false;
526 if (!want_inline && report)
527 report_inline_failed_reason (e);
528 return want_inline;
531 /* EDGE is self recursive edge.
532 We hand two cases - when function A is inlining into itself
533 or when function A is being inlined into another inliner copy of function
534 A within function B.
536 In first case OUTER_NODE points to the toplevel copy of A, while
537 in the second case OUTER_NODE points to the outermost copy of A in B.
539 In both cases we want to be extra selective since
540 inlining the call will just introduce new recursive calls to appear. */
542 static bool
543 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
544 struct cgraph_node *outer_node,
545 bool peeling,
546 int depth)
548 char const *reason = NULL;
549 bool want_inline = true;
550 int caller_freq = CGRAPH_FREQ_BASE;
551 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
553 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
554 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
556 if (!cgraph_maybe_hot_edge_p (edge))
558 reason = "recursive call is cold";
559 want_inline = false;
561 else if (max_count && !outer_node->count)
563 reason = "not executed in profile";
564 want_inline = false;
566 else if (depth > max_depth)
568 reason = "--param max-inline-recursive-depth exceeded.";
569 want_inline = false;
572 if (outer_node->global.inlined_to)
573 caller_freq = outer_node->callers->frequency;
575 if (!want_inline)
577 /* Inlining of self recursive function into copy of itself within other function
578 is transformation similar to loop peeling.
580 Peeling is profitable if we can inline enough copies to make probability
581 of actual call to the self recursive function very small. Be sure that
582 the probability of recursion is small.
584 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
585 This way the expected number of recision is at most max_depth. */
586 else if (peeling)
588 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
589 / max_depth);
590 int i;
591 for (i = 1; i < depth; i++)
592 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
593 if (max_count
594 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
595 >= max_prob))
597 reason = "profile of recursive call is too large";
598 want_inline = false;
600 if (!max_count
601 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
602 >= max_prob))
604 reason = "frequency of recursive call is too large";
605 want_inline = false;
608 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
609 depth is large. We reduce function call overhead and increase chances that
610 things fit in hardware return predictor.
612 Recursive inlining might however increase cost of stack frame setup
613 actually slowing down functions whose recursion tree is wide rather than
614 deep.
616 Deciding reliably on when to do recursive inlining without profile feedback
617 is tricky. For now we disable recursive inlining when probability of self
618 recursion is low.
620 Recursive inlining of self recursive call within loop also results in large loop
621 depths that generally optimize badly. We may want to throttle down inlining
622 in those cases. In particular this seems to happen in one of libstdc++ rb tree
623 methods. */
624 else
626 if (max_count
627 && (edge->count * 100 / outer_node->count
628 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
630 reason = "profile of recursive call is too small";
631 want_inline = false;
633 else if (!max_count
634 && (edge->frequency * 100 / caller_freq
635 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
637 reason = "frequency of recursive call is too small";
638 want_inline = false;
641 if (!want_inline && dump_file)
642 fprintf (dump_file, " not inlining recursively: %s\n", reason);
643 return want_inline;
646 /* Return true when NODE has caller other than EDGE.
647 Worker for cgraph_for_node_and_aliases. */
649 static bool
650 check_caller_edge (struct cgraph_node *node, void *edge)
652 return (node->callers
653 && node->callers != edge);
657 /* Decide if NODE is called once inlining it would eliminate need
658 for the offline copy of function. */
660 static bool
661 want_inline_function_called_once_p (struct cgraph_node *node)
663 struct cgraph_node *function = cgraph_function_or_thunk_node (node, NULL);
664 /* Already inlined? */
665 if (function->global.inlined_to)
666 return false;
667 /* Zero or more then one callers? */
668 if (!node->callers
669 || node->callers->next_caller)
670 return false;
671 /* Maybe other aliases has more direct calls. */
672 if (cgraph_for_node_and_aliases (node, check_caller_edge, node->callers, true))
673 return false;
674 /* Recursive call makes no sense to inline. */
675 if (cgraph_edge_recursive_p (node->callers))
676 return false;
677 /* External functions are not really in the unit, so inlining
678 them when called once would just increase the program size. */
679 if (DECL_EXTERNAL (function->decl))
680 return false;
681 /* Offline body must be optimized out. */
682 if (!cgraph_will_be_removed_from_program_if_no_direct_calls (function))
683 return false;
684 if (!can_inline_edge_p (node->callers, true))
685 return false;
686 return true;
690 /* Return relative time improvement for inlining EDGE in range
691 1...2^9. */
693 static inline int
694 relative_time_benefit (struct inline_summary *callee_info,
695 struct cgraph_edge *edge,
696 int time_growth)
698 int relbenefit;
699 gcov_type uninlined_call_time;
701 uninlined_call_time =
702 ((gcov_type)
703 (callee_info->time
704 + inline_edge_summary (edge)->call_stmt_time
705 + CGRAPH_FREQ_BASE / 2) * edge->frequency
706 / CGRAPH_FREQ_BASE);
707 /* Compute relative time benefit, i.e. how much the call becomes faster.
708 ??? perhaps computing how much the caller+calle together become faster
709 would lead to more realistic results. */
710 if (!uninlined_call_time)
711 uninlined_call_time = 1;
712 relbenefit =
713 (uninlined_call_time - time_growth) * 256 / (uninlined_call_time);
714 relbenefit = MIN (relbenefit, 512);
715 relbenefit = MAX (relbenefit, 1);
716 return relbenefit;
720 /* A cost model driving the inlining heuristics in a way so the edges with
721 smallest badness are inlined first. After each inlining is performed
722 the costs of all caller edges of nodes affected are recomputed so the
723 metrics may accurately depend on values such as number of inlinable callers
724 of the function or function body size. */
726 static int
727 edge_badness (struct cgraph_edge *edge, bool dump)
729 gcov_type badness;
730 int growth, time_growth;
731 struct cgraph_node *callee = cgraph_function_or_thunk_node (edge->callee,
732 NULL);
733 struct inline_summary *callee_info = inline_summary (callee);
735 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
736 return INT_MIN;
738 growth = estimate_edge_growth (edge);
739 time_growth = estimate_edge_time (edge);
741 if (dump)
743 fprintf (dump_file, " Badness calculation for %s -> %s\n",
744 cgraph_node_name (edge->caller),
745 cgraph_node_name (callee));
746 fprintf (dump_file, " size growth %i, time growth %i\n",
747 growth,
748 time_growth);
751 /* Always prefer inlining saving code size. */
752 if (growth <= 0)
754 badness = INT_MIN / 2 + growth;
755 if (dump)
756 fprintf (dump_file, " %i: Growth %i <= 0\n", (int) badness,
757 growth);
760 /* When profiling is available, compute badness as:
762 relative_edge_count * relative_time_benefit
763 goodness = -------------------------------------------
764 edge_growth
765 badness = -goodness
767 The fraction is upside down, becuase on edge counts and time beneits
768 the bounds are known. Edge growth is essentially unlimited. */
770 else if (max_count)
772 int relbenefit = relative_time_benefit (callee_info, edge, time_growth);
773 badness =
774 ((int)
775 ((double) edge->count * INT_MIN / 2 / max_count / 512) *
776 relative_time_benefit (callee_info, edge, time_growth)) / growth;
778 /* Be sure that insanity of the profile won't lead to increasing counts
779 in the scalling and thus to overflow in the computation above. */
780 gcc_assert (max_count >= edge->count);
781 if (dump)
783 fprintf (dump_file,
784 " %i (relative %f): profile info. Relative count %f"
785 " * Relative benefit %f\n",
786 (int) badness, (double) badness / INT_MIN,
787 (double) edge->count / max_count,
788 relbenefit * 100 / 256.0);
792 /* When function local profile is available. Compute badness as:
795 growth_of_callee
796 badness = -------------------------------------- + growth_for-all
797 relative_time_benefit * edge_frequency
800 else if (flag_guess_branch_prob)
802 int div = edge->frequency * (1<<10) / CGRAPH_FREQ_MAX;
803 int growth_for_all;
805 div = MAX (div, 1);
806 gcc_checking_assert (edge->frequency <= CGRAPH_FREQ_MAX);
807 div *= relative_time_benefit (callee_info, edge, time_growth);
809 /* frequency is normalized in range 1...2^10.
810 relbenefit in range 1...2^9
811 DIV should be in range 1....2^19. */
812 gcc_checking_assert (div >= 1 && div <= (1<<19));
814 /* Result must be integer in range 0...INT_MAX.
815 Set the base of fixed point calculation so we don't lose much of
816 precision for small bandesses (those are interesting) yet we don't
817 overflow for growths that are still in interesting range. */
818 badness = ((gcov_type)growth) * (1<<18);
819 badness = (badness + div / 2) / div;
821 /* Overall growth of inlining all calls of function matters: we want to
822 inline so offline copy of function is no longer needed.
824 Additionally functions that can be fully inlined without much of
825 effort are better inline candidates than functions that can be fully
826 inlined only after noticeable overall unit growths. The latter
827 are better in a sense compressing of code size by factoring out common
828 code into separate function shared by multiple code paths.
830 We might mix the valud into the fraction by taking into account
831 relative growth of the unit, but for now just add the number
832 into resulting fraction. */
833 growth_for_all = estimate_growth (callee);
834 badness += growth_for_all;
835 if (badness > INT_MAX - 1)
836 badness = INT_MAX - 1;
837 if (dump)
839 fprintf (dump_file,
840 " %i: guessed profile. frequency %f, overall growth %i,"
841 " benefit %f%%, divisor %i\n",
842 (int) badness, (double)edge->frequency / CGRAPH_FREQ_BASE, growth_for_all,
843 relative_time_benefit (callee_info, edge, time_growth) * 100 / 256.0, div);
846 /* When function local profile is not available or it does not give
847 useful information (ie frequency is zero), base the cost on
848 loop nest and overall size growth, so we optimize for overall number
849 of functions fully inlined in program. */
850 else
852 int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
853 badness = estimate_growth (callee) * 256;
855 /* Decrease badness if call is nested. */
856 if (badness > 0)
857 badness >>= nest;
858 else
860 badness <<= nest;
862 if (dump)
863 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
864 nest);
867 /* Ensure that we did not overflow in all the fixed point math above. */
868 gcc_assert (badness >= INT_MIN);
869 gcc_assert (badness <= INT_MAX - 1);
870 /* Make recursive inlining happen always after other inlining is done. */
871 if (cgraph_edge_recursive_p (edge))
872 return badness + 1;
873 else
874 return badness;
877 /* Recompute badness of EDGE and update its key in HEAP if needed. */
878 static inline void
879 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
881 int badness = edge_badness (edge, false);
882 if (edge->aux)
884 fibnode_t n = (fibnode_t) edge->aux;
885 gcc_checking_assert (n->data == edge);
887 /* fibheap_replace_key only decrease the keys.
888 When we increase the key we do not update heap
889 and instead re-insert the element once it becomes
890 a minimum of heap. */
891 if (badness < n->key)
893 if (dump_file && (dump_flags & TDF_DETAILS))
895 fprintf (dump_file,
896 " decreasing badness %s/%i -> %s/%i, %i to %i\n",
897 cgraph_node_name (edge->caller), edge->caller->uid,
898 cgraph_node_name (edge->callee), edge->callee->uid,
899 (int)n->key,
900 badness);
902 fibheap_replace_key (heap, n, badness);
903 gcc_checking_assert (n->key == badness);
906 else
908 if (dump_file && (dump_flags & TDF_DETAILS))
910 fprintf (dump_file,
911 " enqueuing call %s/%i -> %s/%i, badness %i\n",
912 cgraph_node_name (edge->caller), edge->caller->uid,
913 cgraph_node_name (edge->callee), edge->callee->uid,
914 badness);
916 edge->aux = fibheap_insert (heap, badness, edge);
921 /* NODE was inlined.
922 All caller edges needs to be resetted because
923 size estimates change. Similarly callees needs reset
924 because better context may be known. */
926 static void
927 reset_edge_caches (struct cgraph_node *node)
929 struct cgraph_edge *edge;
930 struct cgraph_edge *e = node->callees;
931 struct cgraph_node *where = node;
932 int i;
933 struct ipa_ref *ref;
935 if (where->global.inlined_to)
936 where = where->global.inlined_to;
938 /* WHERE body size has changed, the cached growth is invalid. */
939 reset_node_growth_cache (where);
941 for (edge = where->callers; edge; edge = edge->next_caller)
942 if (edge->inline_failed)
943 reset_edge_growth_cache (edge);
944 for (i = 0; ipa_ref_list_refering_iterate (&where->ref_list, i, ref); i++)
945 if (ref->use == IPA_REF_ALIAS)
946 reset_edge_caches (ipa_ref_refering_node (ref));
948 if (!e)
949 return;
951 while (true)
952 if (!e->inline_failed && e->callee->callees)
953 e = e->callee->callees;
954 else
956 if (e->inline_failed)
957 reset_edge_growth_cache (e);
958 if (e->next_callee)
959 e = e->next_callee;
960 else
964 if (e->caller == node)
965 return;
966 e = e->caller->callers;
968 while (!e->next_callee);
969 e = e->next_callee;
974 /* Recompute HEAP nodes for each of caller of NODE.
975 UPDATED_NODES track nodes we already visited, to avoid redundant work.
976 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
977 it is inlinable. Otherwise check all edges. */
979 static void
980 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
981 bitmap updated_nodes,
982 struct cgraph_edge *check_inlinablity_for)
984 struct cgraph_edge *edge;
985 int i;
986 struct ipa_ref *ref;
988 if ((!node->alias && !inline_summary (node)->inlinable)
989 || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
990 || node->global.inlined_to)
991 return;
992 if (!bitmap_set_bit (updated_nodes, node->uid))
993 return;
995 for (i = 0; ipa_ref_list_refering_iterate (&node->ref_list, i, ref); i++)
996 if (ref->use == IPA_REF_ALIAS)
998 struct cgraph_node *alias = ipa_ref_refering_node (ref);
999 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1002 for (edge = node->callers; edge; edge = edge->next_caller)
1003 if (edge->inline_failed)
1005 if (!check_inlinablity_for
1006 || check_inlinablity_for == edge)
1008 if (can_inline_edge_p (edge, false)
1009 && want_inline_small_function_p (edge, false))
1010 update_edge_key (heap, edge);
1011 else if (edge->aux)
1013 report_inline_failed_reason (edge);
1014 fibheap_delete_node (heap, (fibnode_t) edge->aux);
1015 edge->aux = NULL;
1018 else if (edge->aux)
1019 update_edge_key (heap, edge);
1023 /* Recompute HEAP nodes for each uninlined call in NODE.
1024 This is used when we know that edge badnesses are going only to increase
1025 (we introduced new call site) and thus all we need is to insert newly
1026 created edges into heap. */
1028 static void
1029 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
1030 bitmap updated_nodes)
1032 struct cgraph_edge *e = node->callees;
1034 if (!e)
1035 return;
1036 while (true)
1037 if (!e->inline_failed && e->callee->callees)
1038 e = e->callee->callees;
1039 else
1041 enum availability avail;
1042 struct cgraph_node *callee;
1043 /* We do not reset callee growth cache here. Since we added a new call,
1044 growth chould have just increased and consequentely badness metric
1045 don't need updating. */
1046 if (e->inline_failed
1047 && (callee = cgraph_function_or_thunk_node (e->callee, &avail))
1048 && inline_summary (callee)->inlinable
1049 && cgraph_function_body_availability (callee) >= AVAIL_AVAILABLE
1050 && !bitmap_bit_p (updated_nodes, callee->uid))
1052 if (can_inline_edge_p (e, false)
1053 && want_inline_small_function_p (e, false))
1054 update_edge_key (heap, e);
1055 else if (e->aux)
1057 report_inline_failed_reason (e);
1058 fibheap_delete_node (heap, (fibnode_t) e->aux);
1059 e->aux = NULL;
1062 if (e->next_callee)
1063 e = e->next_callee;
1064 else
1068 if (e->caller == node)
1069 return;
1070 e = e->caller->callers;
1072 while (!e->next_callee);
1073 e = e->next_callee;
1078 /* Recompute heap nodes for each of caller edges of each of callees.
1079 Walk recursively into all inline clones. */
1081 static void
1082 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
1083 bitmap updated_nodes)
1085 struct cgraph_edge *e = node->callees;
1086 if (!e)
1087 return;
1088 while (true)
1089 if (!e->inline_failed && e->callee->callees)
1090 e = e->callee->callees;
1091 else
1093 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee,
1094 NULL);
1096 /* We inlined and thus callees might have different number of calls.
1097 Reset their caches */
1098 reset_node_growth_cache (callee);
1099 if (e->inline_failed)
1100 update_caller_keys (heap, callee, updated_nodes, e);
1101 if (e->next_callee)
1102 e = e->next_callee;
1103 else
1107 if (e->caller == node)
1108 return;
1109 e = e->caller->callers;
1111 while (!e->next_callee);
1112 e = e->next_callee;
1117 /* Enqueue all recursive calls from NODE into priority queue depending on
1118 how likely we want to recursively inline the call. */
1120 static void
1121 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1122 fibheap_t heap)
1124 struct cgraph_edge *e;
1125 enum availability avail;
1127 for (e = where->callees; e; e = e->next_callee)
1128 if (e->callee == node
1129 || (cgraph_function_or_thunk_node (e->callee, &avail) == node
1130 && avail > AVAIL_OVERWRITABLE))
1132 /* When profile feedback is available, prioritize by expected number
1133 of calls. */
1134 fibheap_insert (heap,
1135 !max_count ? -e->frequency
1136 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
1139 for (e = where->callees; e; e = e->next_callee)
1140 if (!e->inline_failed)
1141 lookup_recursive_calls (node, e->callee, heap);
1144 /* Decide on recursive inlining: in the case function has recursive calls,
1145 inline until body size reaches given argument. If any new indirect edges
1146 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1147 is NULL. */
1149 static bool
1150 recursive_inlining (struct cgraph_edge *edge,
1151 VEC (cgraph_edge_p, heap) **new_edges)
1153 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1154 fibheap_t heap;
1155 struct cgraph_node *node;
1156 struct cgraph_edge *e;
1157 struct cgraph_node *master_clone = NULL, *next;
1158 int depth = 0;
1159 int n = 0;
1161 node = edge->caller;
1162 if (node->global.inlined_to)
1163 node = node->global.inlined_to;
1165 if (DECL_DECLARED_INLINE_P (node->decl))
1166 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1168 /* Make sure that function is small enough to be considered for inlining. */
1169 if (estimate_size_after_inlining (node, edge) >= limit)
1170 return false;
1171 heap = fibheap_new ();
1172 lookup_recursive_calls (node, node, heap);
1173 if (fibheap_empty (heap))
1175 fibheap_delete (heap);
1176 return false;
1179 if (dump_file)
1180 fprintf (dump_file,
1181 " Performing recursive inlining on %s\n",
1182 cgraph_node_name (node));
1184 /* Do the inlining and update list of recursive call during process. */
1185 while (!fibheap_empty (heap))
1187 struct cgraph_edge *curr
1188 = (struct cgraph_edge *) fibheap_extract_min (heap);
1189 struct cgraph_node *cnode;
1191 if (estimate_size_after_inlining (node, curr) > limit)
1192 break;
1194 if (!can_inline_edge_p (curr, true))
1195 continue;
1197 depth = 1;
1198 for (cnode = curr->caller;
1199 cnode->global.inlined_to; cnode = cnode->callers->caller)
1200 if (node->decl == curr->callee->decl)
1201 depth++;
1203 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1204 continue;
1206 if (dump_file)
1208 fprintf (dump_file,
1209 " Inlining call of depth %i", depth);
1210 if (node->count)
1212 fprintf (dump_file, " called approx. %.2f times per call",
1213 (double)curr->count / node->count);
1215 fprintf (dump_file, "\n");
1217 if (!master_clone)
1219 /* We need original clone to copy around. */
1220 master_clone = cgraph_clone_node (node, node->decl,
1221 node->count, CGRAPH_FREQ_BASE,
1222 false, NULL, true);
1223 for (e = master_clone->callees; e; e = e->next_callee)
1224 if (!e->inline_failed)
1225 clone_inlined_nodes (e, true, false, NULL);
1228 cgraph_redirect_edge_callee (curr, master_clone);
1229 inline_call (curr, false, new_edges, &overall_size);
1230 lookup_recursive_calls (node, curr->callee, heap);
1231 n++;
1234 if (!fibheap_empty (heap) && dump_file)
1235 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1236 fibheap_delete (heap);
1238 if (!master_clone)
1239 return false;
1241 if (dump_file)
1242 fprintf (dump_file,
1243 "\n Inlined %i times, "
1244 "body grown from size %i to %i, time %i to %i\n", n,
1245 inline_summary (master_clone)->size, inline_summary (node)->size,
1246 inline_summary (master_clone)->time, inline_summary (node)->time);
1248 /* Remove master clone we used for inlining. We rely that clones inlined
1249 into master clone gets queued just before master clone so we don't
1250 need recursion. */
1251 for (node = cgraph_nodes; node != master_clone;
1252 node = next)
1254 next = node->next;
1255 if (node->global.inlined_to == master_clone)
1256 cgraph_remove_node (node);
1258 cgraph_remove_node (master_clone);
1259 return true;
1263 /* Given whole compilation unit estimate of INSNS, compute how large we can
1264 allow the unit to grow. */
1266 static int
1267 compute_max_insns (int insns)
1269 int max_insns = insns;
1270 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1271 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1273 return ((HOST_WIDEST_INT) max_insns
1274 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1278 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1280 static void
1281 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
1283 while (VEC_length (cgraph_edge_p, new_edges) > 0)
1285 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
1287 gcc_assert (!edge->aux);
1288 if (edge->inline_failed
1289 && can_inline_edge_p (edge, true)
1290 && want_inline_small_function_p (edge, true))
1291 edge->aux = fibheap_insert (heap, edge_badness (edge, false), edge);
1296 /* We use greedy algorithm for inlining of small functions:
1297 All inline candidates are put into prioritized heap ordered in
1298 increasing badness.
1300 The inlining of small functions is bounded by unit growth parameters. */
1302 static void
1303 inline_small_functions (void)
1305 struct cgraph_node *node;
1306 struct cgraph_edge *edge;
1307 fibheap_t heap = fibheap_new ();
1308 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1309 int min_size, max_size;
1310 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1311 int initial_size = 0;
1313 if (flag_indirect_inlining)
1314 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1316 if (dump_file)
1317 fprintf (dump_file,
1318 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1319 initial_size);
1321 /* Compute overall unit size and other global parameters used by badness
1322 metrics. */
1324 max_count = 0;
1325 initialize_growth_caches ();
1327 FOR_EACH_DEFINED_FUNCTION (node)
1328 if (!node->global.inlined_to)
1330 if (cgraph_function_with_gimple_body_p (node)
1331 || node->thunk.thunk_p)
1333 struct inline_summary *info = inline_summary (node);
1335 if (!DECL_EXTERNAL (node->decl))
1336 initial_size += info->size;
1339 for (edge = node->callers; edge; edge = edge->next_caller)
1340 if (max_count < edge->count)
1341 max_count = edge->count;
1344 overall_size = initial_size;
1345 max_size = compute_max_insns (overall_size);
1346 min_size = overall_size;
1348 /* Populate the heeap with all edges we might inline. */
1350 FOR_EACH_DEFINED_FUNCTION (node)
1351 if (!node->global.inlined_to)
1353 if (dump_file)
1354 fprintf (dump_file, "Enqueueing calls of %s/%i.\n",
1355 cgraph_node_name (node), node->uid);
1357 for (edge = node->callers; edge; edge = edge->next_caller)
1358 if (edge->inline_failed
1359 && can_inline_edge_p (edge, true)
1360 && want_inline_small_function_p (edge, true)
1361 && edge->inline_failed)
1363 gcc_assert (!edge->aux);
1364 update_edge_key (heap, edge);
1368 gcc_assert (in_lto_p
1369 || !max_count
1370 || (profile_info && flag_branch_probabilities));
1372 while (!fibheap_empty (heap))
1374 int old_size = overall_size;
1375 struct cgraph_node *where, *callee;
1376 int badness = fibheap_min_key (heap);
1377 int current_badness;
1378 int growth;
1380 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1381 gcc_assert (edge->aux);
1382 edge->aux = NULL;
1383 if (!edge->inline_failed)
1384 continue;
1386 /* Be sure that caches are maintained consistent. */
1387 #ifdef ENABLE_CHECKING
1388 reset_edge_growth_cache (edge);
1389 reset_node_growth_cache (edge->callee);
1390 #endif
1392 /* When updating the edge costs, we only decrease badness in the keys.
1393 Increases of badness are handled lazilly; when we see key with out
1394 of date value on it, we re-insert it now. */
1395 current_badness = edge_badness (edge, false);
1396 gcc_assert (current_badness >= badness);
1397 if (current_badness != badness)
1399 edge->aux = fibheap_insert (heap, current_badness, edge);
1400 continue;
1403 if (!can_inline_edge_p (edge, true))
1404 continue;
1406 callee = cgraph_function_or_thunk_node (edge->callee, NULL);
1407 growth = estimate_edge_growth (edge);
1408 if (dump_file)
1410 fprintf (dump_file,
1411 "\nConsidering %s with %i size\n",
1412 cgraph_node_name (callee),
1413 inline_summary (callee)->size);
1414 fprintf (dump_file,
1415 " to be inlined into %s in %s:%i\n"
1416 " Estimated growth after inlined into all is %+i insns.\n"
1417 " Estimated badness is %i, frequency %.2f.\n",
1418 cgraph_node_name (edge->caller),
1419 flag_wpa ? "unknown"
1420 : gimple_filename ((const_gimple) edge->call_stmt),
1421 flag_wpa ? -1
1422 : gimple_lineno ((const_gimple) edge->call_stmt),
1423 estimate_growth (callee),
1424 badness,
1425 edge->frequency / (double)CGRAPH_FREQ_BASE);
1426 if (edge->count)
1427 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n",
1428 edge->count);
1429 if (dump_flags & TDF_DETAILS)
1430 edge_badness (edge, true);
1433 if (overall_size + growth > max_size
1434 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1436 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1437 report_inline_failed_reason (edge);
1438 continue;
1441 if (!want_inline_small_function_p (edge, true))
1442 continue;
1444 /* Heuristics for inlining small functions works poorly for
1445 recursive calls where we do efect similar to loop unrolling.
1446 When inliing such edge seems profitable, leave decision on
1447 specific inliner. */
1448 if (cgraph_edge_recursive_p (edge))
1450 where = edge->caller;
1451 if (where->global.inlined_to)
1452 where = where->global.inlined_to;
1453 if (!recursive_inlining (edge,
1454 flag_indirect_inlining
1455 ? &new_indirect_edges : NULL))
1457 edge->inline_failed = CIF_RECURSIVE_INLINING;
1458 continue;
1460 reset_edge_caches (where);
1461 /* Recursive inliner inlines all recursive calls of the function
1462 at once. Consequently we need to update all callee keys. */
1463 if (flag_indirect_inlining)
1464 add_new_edges_to_heap (heap, new_indirect_edges);
1465 update_all_callee_keys (heap, where, updated_nodes);
1467 else
1469 struct cgraph_node *outer_node = NULL;
1470 int depth = 0;
1472 /* Consider the case where self recursive function A is inlined into B.
1473 This is desired optimization in some cases, since it leads to effect
1474 similar of loop peeling and we might completely optimize out the
1475 recursive call. However we must be extra selective. */
1477 where = edge->caller;
1478 while (where->global.inlined_to)
1480 if (where->decl == callee->decl)
1481 outer_node = where, depth++;
1482 where = where->callers->caller;
1484 if (outer_node
1485 && !want_inline_self_recursive_call_p (edge, outer_node,
1486 true, depth))
1488 edge->inline_failed
1489 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
1490 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1491 continue;
1493 else if (depth && dump_file)
1494 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
1496 gcc_checking_assert (!callee->global.inlined_to);
1497 inline_call (edge, true, &new_indirect_edges, &overall_size);
1498 if (flag_indirect_inlining)
1499 add_new_edges_to_heap (heap, new_indirect_edges);
1501 reset_edge_caches (edge->callee);
1502 reset_node_growth_cache (callee);
1504 /* We inlined last offline copy to the body. This might lead
1505 to callees of function having fewer call sites and thus they
1506 may need updating. */
1507 if (callee->global.inlined_to)
1508 update_all_callee_keys (heap, callee, updated_nodes);
1509 else
1510 update_callee_keys (heap, edge->callee, updated_nodes);
1512 where = edge->caller;
1513 if (where->global.inlined_to)
1514 where = where->global.inlined_to;
1516 /* Our profitability metric can depend on local properties
1517 such as number of inlinable calls and size of the function body.
1518 After inlining these properties might change for the function we
1519 inlined into (since it's body size changed) and for the functions
1520 called by function we inlined (since number of it inlinable callers
1521 might change). */
1522 update_caller_keys (heap, where, updated_nodes, NULL);
1524 /* We removed one call of the function we just inlined. If offline
1525 copy is still needed, be sure to update the keys. */
1526 if (callee != where && !callee->global.inlined_to)
1527 update_caller_keys (heap, callee, updated_nodes, NULL);
1528 bitmap_clear (updated_nodes);
1530 if (dump_file)
1532 fprintf (dump_file,
1533 " Inlined into %s which now has time %i and size %i,"
1534 "net change of %+i.\n",
1535 cgraph_node_name (edge->caller),
1536 inline_summary (edge->caller)->time,
1537 inline_summary (edge->caller)->size,
1538 overall_size - old_size);
1540 if (min_size > overall_size)
1542 min_size = overall_size;
1543 max_size = compute_max_insns (min_size);
1545 if (dump_file)
1546 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1550 free_growth_caches ();
1551 if (new_indirect_edges)
1552 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1553 fibheap_delete (heap);
1554 if (dump_file)
1555 fprintf (dump_file,
1556 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1557 initial_size, overall_size,
1558 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
1559 BITMAP_FREE (updated_nodes);
1562 /* Flatten NODE. Performed both during early inlining and
1563 at IPA inlining time. */
1565 static void
1566 flatten_function (struct cgraph_node *node, bool early)
1568 struct cgraph_edge *e;
1570 /* We shouldn't be called recursively when we are being processed. */
1571 gcc_assert (node->aux == NULL);
1573 node->aux = (void *) node;
1575 for (e = node->callees; e; e = e->next_callee)
1577 struct cgraph_node *orig_callee;
1578 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
1580 /* We've hit cycle? It is time to give up. */
1581 if (callee->aux)
1583 if (dump_file)
1584 fprintf (dump_file,
1585 "Not inlining %s into %s to avoid cycle.\n",
1586 cgraph_node_name (callee),
1587 cgraph_node_name (e->caller));
1588 e->inline_failed = CIF_RECURSIVE_INLINING;
1589 continue;
1592 /* When the edge is already inlined, we just need to recurse into
1593 it in order to fully flatten the leaves. */
1594 if (!e->inline_failed)
1596 flatten_function (callee, early);
1597 continue;
1600 /* Flatten attribute needs to be processed during late inlining. For
1601 extra code quality we however do flattening during early optimization,
1602 too. */
1603 if (!early
1604 ? !can_inline_edge_p (e, true)
1605 : !can_early_inline_edge_p (e))
1606 continue;
1608 if (cgraph_edge_recursive_p (e))
1610 if (dump_file)
1611 fprintf (dump_file, "Not inlining: recursive call.\n");
1612 continue;
1615 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1616 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
1618 if (dump_file)
1619 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1620 continue;
1623 /* Inline the edge and flatten the inline clone. Avoid
1624 recursing through the original node if the node was cloned. */
1625 if (dump_file)
1626 fprintf (dump_file, " Inlining %s into %s.\n",
1627 cgraph_node_name (callee),
1628 cgraph_node_name (e->caller));
1629 orig_callee = callee;
1630 inline_call (e, true, NULL, NULL);
1631 if (e->callee != orig_callee)
1632 orig_callee->aux = (void *) node;
1633 flatten_function (e->callee, early);
1634 if (e->callee != orig_callee)
1635 orig_callee->aux = NULL;
1638 node->aux = NULL;
1641 /* Decide on the inlining. We do so in the topological order to avoid
1642 expenses on updating data structures. */
1644 static unsigned int
1645 ipa_inline (void)
1647 struct cgraph_node *node;
1648 int nnodes;
1649 struct cgraph_node **order =
1650 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1651 int i;
1653 if (in_lto_p && flag_indirect_inlining)
1654 ipa_update_after_lto_read ();
1655 if (flag_indirect_inlining)
1656 ipa_create_all_structures_for_iinln ();
1658 if (dump_file)
1659 dump_inline_summaries (dump_file);
1661 nnodes = ipa_reverse_postorder (order);
1663 for (node = cgraph_nodes; node; node = node->next)
1664 node->aux = 0;
1666 if (dump_file)
1667 fprintf (dump_file, "\nFlattening functions:\n");
1669 /* In the first pass handle functions to be flattened. Do this with
1670 a priority so none of our later choices will make this impossible. */
1671 for (i = nnodes - 1; i >= 0; i--)
1673 node = order[i];
1675 /* Handle nodes to be flattened.
1676 Ideally when processing callees we stop inlining at the
1677 entry of cycles, possibly cloning that entry point and
1678 try to flatten itself turning it into a self-recursive
1679 function. */
1680 if (lookup_attribute ("flatten",
1681 DECL_ATTRIBUTES (node->decl)) != NULL)
1683 if (dump_file)
1684 fprintf (dump_file,
1685 "Flattening %s\n", cgraph_node_name (node));
1686 flatten_function (node, false);
1690 inline_small_functions ();
1691 cgraph_remove_unreachable_nodes (true, dump_file);
1692 free (order);
1694 /* We already perform some inlining of functions called once during
1695 inlining small functions above. After unreachable nodes are removed,
1696 we still might do a quick check that nothing new is found. */
1697 if (flag_inline_functions_called_once)
1699 int cold;
1700 if (dump_file)
1701 fprintf (dump_file, "\nDeciding on functions called once:\n");
1703 /* Inlining one function called once has good chance of preventing
1704 inlining other function into the same callee. Ideally we should
1705 work in priority order, but probably inlining hot functions first
1706 is good cut without the extra pain of maintaining the queue.
1708 ??? this is not really fitting the bill perfectly: inlining function
1709 into callee often leads to better optimization of callee due to
1710 increased context for optimization.
1711 For example if main() function calls a function that outputs help
1712 and then function that does the main optmization, we should inline
1713 the second with priority even if both calls are cold by themselves.
1715 We probably want to implement new predicate replacing our use of
1716 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
1717 to be hot. */
1718 for (cold = 0; cold <= 1; cold ++)
1720 for (node = cgraph_nodes; node; node = node->next)
1722 if (want_inline_function_called_once_p (node)
1723 && (cold
1724 || cgraph_maybe_hot_edge_p (node->callers)))
1726 struct cgraph_node *caller = node->callers->caller;
1728 if (dump_file)
1730 fprintf (dump_file,
1731 "\nInlining %s size %i.\n",
1732 cgraph_node_name (node), inline_summary (node)->size);
1733 fprintf (dump_file,
1734 " Called once from %s %i insns.\n",
1735 cgraph_node_name (node->callers->caller),
1736 inline_summary (node->callers->caller)->size);
1739 inline_call (node->callers, true, NULL, NULL);
1740 if (dump_file)
1741 fprintf (dump_file,
1742 " Inlined into %s which now has %i size\n",
1743 cgraph_node_name (caller),
1744 inline_summary (caller)->size);
1750 /* Free ipa-prop structures if they are no longer needed. */
1751 if (flag_indirect_inlining)
1752 ipa_free_all_structures_after_iinln ();
1754 if (dump_file)
1755 fprintf (dump_file,
1756 "\nInlined %i calls, eliminated %i functions\n\n",
1757 ncalls_inlined, nfunctions_inlined);
1759 if (dump_file)
1760 dump_inline_summaries (dump_file);
1761 /* In WPA we use inline summaries for partitioning process. */
1762 if (!flag_wpa)
1763 inline_free_summary ();
1764 return 0;
1767 /* Inline always-inline function calls in NODE. */
1769 static bool
1770 inline_always_inline_functions (struct cgraph_node *node)
1772 struct cgraph_edge *e;
1773 bool inlined = false;
1775 for (e = node->callees; e; e = e->next_callee)
1777 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
1778 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1779 continue;
1781 if (cgraph_edge_recursive_p (e))
1783 if (dump_file)
1784 fprintf (dump_file, " Not inlining recursive call to %s.\n",
1785 cgraph_node_name (e->callee));
1786 e->inline_failed = CIF_RECURSIVE_INLINING;
1787 continue;
1790 if (!can_early_inline_edge_p (e))
1791 continue;
1793 if (dump_file)
1794 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
1795 cgraph_node_name (e->callee),
1796 cgraph_node_name (e->caller));
1797 inline_call (e, true, NULL, NULL);
1798 inlined = true;
1801 return inlined;
1804 /* Decide on the inlining. We do so in the topological order to avoid
1805 expenses on updating data structures. */
1807 static bool
1808 early_inline_small_functions (struct cgraph_node *node)
1810 struct cgraph_edge *e;
1811 bool inlined = false;
1813 for (e = node->callees; e; e = e->next_callee)
1815 struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
1816 if (!inline_summary (callee)->inlinable
1817 || !e->inline_failed)
1818 continue;
1820 /* Do not consider functions not declared inline. */
1821 if (!DECL_DECLARED_INLINE_P (callee->decl)
1822 && !flag_inline_small_functions
1823 && !flag_inline_functions)
1824 continue;
1826 if (dump_file)
1827 fprintf (dump_file, "Considering inline candidate %s.\n",
1828 cgraph_node_name (callee));
1830 if (!can_early_inline_edge_p (e))
1831 continue;
1833 if (cgraph_edge_recursive_p (e))
1835 if (dump_file)
1836 fprintf (dump_file, " Not inlining: recursive call.\n");
1837 continue;
1840 if (!want_early_inline_function_p (e))
1841 continue;
1843 if (dump_file)
1844 fprintf (dump_file, " Inlining %s into %s.\n",
1845 cgraph_node_name (callee),
1846 cgraph_node_name (e->caller));
1847 inline_call (e, true, NULL, NULL);
1848 inlined = true;
1851 return inlined;
1854 /* Do inlining of small functions. Doing so early helps profiling and other
1855 passes to be somewhat more effective and avoids some code duplication in
1856 later real inlining pass for testcases with very many function calls. */
1857 static unsigned int
1858 early_inliner (void)
1860 struct cgraph_node *node = cgraph_get_node (current_function_decl);
1861 struct cgraph_edge *edge;
1862 unsigned int todo = 0;
1863 int iterations = 0;
1864 bool inlined = false;
1866 if (seen_error ())
1867 return 0;
1869 /* Do nothing if datastructures for ipa-inliner are already computed. This
1870 happens when some pass decides to construct new function and
1871 cgraph_add_new_function calls lowering passes and early optimization on
1872 it. This may confuse ourself when early inliner decide to inline call to
1873 function clone, because function clones don't have parameter list in
1874 ipa-prop matching their signature. */
1875 if (ipa_node_params_vector)
1876 return 0;
1878 #ifdef ENABLE_CHECKING
1879 verify_cgraph_node (node);
1880 #endif
1882 /* Even when not optimizing or not inlining inline always-inline
1883 functions. */
1884 inlined = inline_always_inline_functions (node);
1886 if (!optimize
1887 || flag_no_inline
1888 || !flag_early_inlining
1889 /* Never inline regular functions into always-inline functions
1890 during incremental inlining. This sucks as functions calling
1891 always inline functions will get less optimized, but at the
1892 same time inlining of functions calling always inline
1893 function into an always inline function might introduce
1894 cycles of edges to be always inlined in the callgraph.
1896 We might want to be smarter and just avoid this type of inlining. */
1897 || DECL_DISREGARD_INLINE_LIMITS (node->decl))
1899 else if (lookup_attribute ("flatten",
1900 DECL_ATTRIBUTES (node->decl)) != NULL)
1902 /* When the function is marked to be flattened, recursively inline
1903 all calls in it. */
1904 if (dump_file)
1905 fprintf (dump_file,
1906 "Flattening %s\n", cgraph_node_name (node));
1907 flatten_function (node, true);
1908 inlined = true;
1910 else
1912 /* We iterate incremental inlining to get trivial cases of indirect
1913 inlining. */
1914 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1915 && early_inline_small_functions (node))
1917 timevar_push (TV_INTEGRATION);
1918 todo |= optimize_inline_calls (current_function_decl);
1920 /* Technically we ought to recompute inline parameters so the new
1921 iteration of early inliner works as expected. We however have
1922 values approximately right and thus we only need to update edge
1923 info that might be cleared out for newly discovered edges. */
1924 for (edge = node->callees; edge; edge = edge->next_callee)
1926 struct inline_edge_summary *es = inline_edge_summary (edge);
1927 es->call_stmt_size
1928 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
1929 es->call_stmt_time
1930 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
1932 timevar_pop (TV_INTEGRATION);
1933 iterations++;
1934 inlined = false;
1936 if (dump_file)
1937 fprintf (dump_file, "Iterations: %i\n", iterations);
1940 if (inlined)
1942 timevar_push (TV_INTEGRATION);
1943 todo |= optimize_inline_calls (current_function_decl);
1944 timevar_pop (TV_INTEGRATION);
1947 cfun->always_inline_functions_inlined = true;
1949 return todo;
1952 struct gimple_opt_pass pass_early_inline =
1955 GIMPLE_PASS,
1956 "einline", /* name */
1957 NULL, /* gate */
1958 early_inliner, /* execute */
1959 NULL, /* sub */
1960 NULL, /* next */
1961 0, /* static_pass_number */
1962 TV_INLINE_HEURISTICS, /* tv_id */
1963 PROP_ssa, /* properties_required */
1964 0, /* properties_provided */
1965 0, /* properties_destroyed */
1966 0, /* todo_flags_start */
1967 0 /* todo_flags_finish */
1972 /* When to run IPA inlining. Inlining of always-inline functions
1973 happens during early inlining. */
1975 static bool
1976 gate_ipa_inline (void)
1978 /* ??? We'd like to skip this if not optimizing or not inlining as
1979 all always-inline functions have been processed by early
1980 inlining already. But this at least breaks EH with C++ as
1981 we need to unconditionally run fixup_cfg even at -O0.
1982 So leave it on unconditionally for now. */
1983 return 1;
1986 struct ipa_opt_pass_d pass_ipa_inline =
1989 IPA_PASS,
1990 "inline", /* name */
1991 gate_ipa_inline, /* gate */
1992 ipa_inline, /* execute */
1993 NULL, /* sub */
1994 NULL, /* next */
1995 0, /* static_pass_number */
1996 TV_INLINE_HEURISTICS, /* tv_id */
1997 0, /* properties_required */
1998 0, /* properties_provided */
1999 0, /* properties_destroyed */
2000 TODO_remove_functions, /* todo_flags_finish */
2001 TODO_dump_cgraph
2002 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
2004 inline_generate_summary, /* generate_summary */
2005 inline_write_summary, /* write_summary */
2006 inline_read_summary, /* read_summary */
2007 NULL, /* write_optimization_summary */
2008 NULL, /* read_optimization_summary */
2009 NULL, /* stmt_fixup */
2010 0, /* TODOs */
2011 inline_transform, /* function_transform */
2012 NULL, /* variable_transform */