Merge from mainline
[official-gcc.git] / gcc / ipa-inline.c
blob2cb6b4207f2721719e4d9b249c343f230190a5d3
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
29 There are three major parts of this file:
31 cgraph_mark_inline implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
43 inlining heuristics
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used in non-unit-at-a-time mode. */
66 #include "config.h"
67 #include "system.h"
68 #include "coretypes.h"
69 #include "tm.h"
70 #include "tree.h"
71 #include "tree-inline.h"
72 #include "langhooks.h"
73 #include "flags.h"
74 #include "cgraph.h"
75 #include "diagnostic.h"
76 #include "timevar.h"
77 #include "params.h"
78 #include "fibheap.h"
79 #include "intl.h"
80 #include "tree-pass.h"
81 #include "hashtab.h"
82 #include "coverage.h"
83 #include "ggc.h"
85 /* Statistics we collect about inlining algorithm. */
86 static int ncalls_inlined;
87 static int nfunctions_inlined;
88 static int initial_insns;
89 static int overall_insns;
90 static int max_insns;
91 static gcov_type max_count;
93 /* Estimate size of the function after inlining WHAT into TO. */
95 static int
96 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
97 struct cgraph_node *what)
99 int size;
100 tree fndecl = what->decl, arg;
101 int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
103 for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
104 call_insns += estimate_move_cost (TREE_TYPE (arg));
105 size = (what->global.insns - call_insns) * times + to->global.insns;
106 gcc_assert (size >= 0);
107 return size;
110 /* E is expected to be an edge being inlined. Clone destination node of
111 the edge and redirect it to the new clone.
112 DUPLICATE is used for bookkeeping on whether we are actually creating new
113 clones or re-using node originally representing out-of-line function call.
115 void
116 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, bool update_original)
118 if (duplicate)
120 /* We may eliminate the need for out-of-line copy to be output.
121 In that case just go ahead and re-use it. */
122 if (!e->callee->callers->next_caller
123 && !e->callee->needed
124 && flag_unit_at_a_time)
126 gcc_assert (!e->callee->global.inlined_to);
127 if (DECL_SAVED_TREE (e->callee->decl))
128 overall_insns -= e->callee->global.insns, nfunctions_inlined++;
129 duplicate = false;
131 else
133 struct cgraph_node *n;
134 n = cgraph_clone_node (e->callee, e->count, e->loop_nest,
135 update_original);
136 cgraph_redirect_edge_callee (e, n);
140 if (e->caller->global.inlined_to)
141 e->callee->global.inlined_to = e->caller->global.inlined_to;
142 else
143 e->callee->global.inlined_to = e->caller;
145 /* Recursively clone all bodies. */
146 for (e = e->callee->callees; e; e = e->next_callee)
147 if (!e->inline_failed)
148 cgraph_clone_inlined_nodes (e, duplicate, update_original);
151 /* Mark edge E as inlined and update callgraph accordingly.
152 UPDATE_ORIGINAL specify whether profile of original function should be
153 updated. */
155 void
156 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original)
158 int old_insns = 0, new_insns = 0;
159 struct cgraph_node *to = NULL, *what;
161 if (e->callee->inline_decl)
162 cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
164 gcc_assert (e->inline_failed);
165 e->inline_failed = NULL;
167 if (!e->callee->global.inlined && flag_unit_at_a_time)
168 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
169 e->callee->global.inlined = true;
171 cgraph_clone_inlined_nodes (e, true, update_original);
173 what = e->callee;
175 /* Now update size of caller and all functions caller is inlined into. */
176 for (;e && !e->inline_failed; e = e->caller->callers)
178 old_insns = e->caller->global.insns;
179 new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
180 what);
181 gcc_assert (new_insns >= 0);
182 to = e->caller;
183 to->global.insns = new_insns;
185 gcc_assert (what->global.inlined_to == to);
186 if (new_insns > old_insns)
187 overall_insns += new_insns - old_insns;
188 ncalls_inlined++;
191 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
192 Return following unredirected edge in the list of callers
193 of EDGE->CALLEE */
195 static struct cgraph_edge *
196 cgraph_mark_inline (struct cgraph_edge *edge)
198 struct cgraph_node *to = edge->caller;
199 struct cgraph_node *what = edge->callee;
200 struct cgraph_edge *e, *next;
201 int times = 0;
203 /* Look for all calls, mark them inline and clone recursively
204 all inlined functions. */
205 for (e = what->callers; e; e = next)
207 next = e->next_caller;
208 if (e->caller == to && e->inline_failed)
210 cgraph_mark_inline_edge (e, true);
211 if (e == edge)
212 edge = next;
213 times++;
216 gcc_assert (times);
217 return edge;
220 /* Estimate the growth caused by inlining NODE into all callees. */
222 static int
223 cgraph_estimate_growth (struct cgraph_node *node)
225 int growth = 0;
226 struct cgraph_edge *e;
227 if (node->global.estimated_growth != INT_MIN)
228 return node->global.estimated_growth;
230 for (e = node->callers; e; e = e->next_caller)
231 if (e->inline_failed)
232 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
233 - e->caller->global.insns);
235 /* ??? Wrong for self recursive functions or cases where we decide to not
236 inline for different reasons, but it is not big deal as in that case
237 we will keep the body around, but we will also avoid some inlining. */
238 if (!node->needed && !DECL_EXTERNAL (node->decl))
239 growth -= node->global.insns;
241 node->global.estimated_growth = growth;
242 return growth;
245 /* Return false when inlining WHAT into TO is not good idea
246 as it would cause too large growth of function bodies. */
248 static bool
249 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
250 const char **reason)
252 int times = 0;
253 struct cgraph_edge *e;
254 int newsize;
255 int limit;
257 if (to->global.inlined_to)
258 to = to->global.inlined_to;
260 for (e = to->callees; e; e = e->next_callee)
261 if (e->callee == what)
262 times++;
264 /* When inlining large function body called once into small function,
265 take the inlined function as base for limiting the growth. */
266 if (to->local.self_insns > what->local.self_insns)
267 limit = to->local.self_insns;
268 else
269 limit = what->local.self_insns;
271 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
273 newsize = cgraph_estimate_size_after_inlining (times, to, what);
274 if (newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
275 && newsize > limit)
277 if (reason)
278 *reason = N_("--param large-function-growth limit reached");
279 return false;
281 return true;
284 /* Return true when function N is small enough to be inlined. */
286 bool
287 cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
289 tree decl = n->decl;
291 if (n->inline_decl)
292 decl = n->inline_decl;
293 if (!DECL_INLINE (decl))
295 if (reason)
296 *reason = N_("function not inlinable");
297 return false;
300 if (!DECL_STRUCT_FUNCTION (decl)->cfg)
302 if (reason)
303 *reason = N_("function body not available");
304 return false;
307 if (DECL_DECLARED_INLINE_P (decl))
309 if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
311 if (reason)
312 *reason = N_("--param max-inline-insns-single limit reached");
313 return false;
316 else
318 if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
320 if (reason)
321 *reason = N_("--param max-inline-insns-auto limit reached");
322 return false;
326 return true;
329 /* Return true when inlining WHAT would create recursive inlining.
330 We call recursive inlining all cases where same function appears more than
331 once in the single recursion nest path in the inline graph. */
333 static bool
334 cgraph_recursive_inlining_p (struct cgraph_node *to,
335 struct cgraph_node *what,
336 const char **reason)
338 bool recursive;
339 if (to->global.inlined_to)
340 recursive = what->decl == to->global.inlined_to->decl;
341 else
342 recursive = what->decl == to->decl;
343 /* Marking recursive function inline has sane semantic and thus we should
344 not warn on it. */
345 if (recursive && reason)
346 *reason = (what->local.disregard_inline_limits
347 ? N_("recursive inlining") : "");
348 return recursive;
351 /* Return true if the call can be hot. */
352 static bool
353 cgraph_maybe_hot_edge_p (struct cgraph_edge *edge)
355 if (profile_info && flag_branch_probabilities
356 && (edge->count
357 <= profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION)))
358 return false;
359 return true;
362 /* A cost model driving the inlining heuristics in a way so the edges with
363 smallest badness are inlined first. After each inlining is performed
364 the costs of all caller edges of nodes affected are recomputed so the
365 metrics may accurately depend on values such as number of inlinable callers
366 of the function or function body size.
368 With profiling we use number of executions of each edge to drive the cost.
369 We also should distinguish hot and cold calls where the cold calls are
370 inlined into only when code size is overall improved.
373 static int
374 cgraph_edge_badness (struct cgraph_edge *edge)
376 if (max_count)
378 int growth =
379 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
380 growth -= edge->caller->global.insns;
382 /* Always prefer inlining saving code size. */
383 if (growth <= 0)
384 return INT_MIN - growth;
385 return ((int)((double)edge->count * INT_MIN / max_count)) / growth;
387 else
389 int nest = MIN (edge->loop_nest, 8);
390 int badness = cgraph_estimate_growth (edge->callee) * 256;
392 /* Decrease badness if call is nested. */
393 if (badness > 0)
394 badness >>= nest;
395 else
396 badness <<= nest;
398 /* Make recursive inlining happen always after other inlining is done. */
399 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
400 return badness + 1;
401 else
402 return badness;
406 /* Recompute heap nodes for each of caller edge. */
408 static void
409 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
410 bitmap updated_nodes)
412 struct cgraph_edge *edge;
414 if (!node->local.inlinable || node->local.disregard_inline_limits
415 || node->global.inlined_to)
416 return;
417 if (bitmap_bit_p (updated_nodes, node->uid))
418 return;
419 bitmap_set_bit (updated_nodes, node->uid);
420 node->global.estimated_growth = INT_MIN;
422 for (edge = node->callers; edge; edge = edge->next_caller)
423 if (edge->inline_failed)
425 int badness = cgraph_edge_badness (edge);
426 if (edge->aux)
428 fibnode_t n = edge->aux;
429 gcc_assert (n->data == edge);
430 if (n->key == badness)
431 continue;
433 /* fibheap_replace_key only increase the keys. */
434 if (fibheap_replace_key (heap, n, badness))
435 continue;
436 fibheap_delete_node (heap, edge->aux);
438 edge->aux = fibheap_insert (heap, badness, edge);
442 /* Recompute heap nodes for each of caller edges of each of callees. */
444 static void
445 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
446 bitmap updated_nodes)
448 struct cgraph_edge *e;
449 node->global.estimated_growth = INT_MIN;
451 for (e = node->callees; e; e = e->next_callee)
452 if (e->inline_failed)
453 update_caller_keys (heap, e->callee, updated_nodes);
454 else if (!e->inline_failed)
455 update_callee_keys (heap, e->callee, updated_nodes);
458 /* Enqueue all recursive calls from NODE into priority queue depending on
459 how likely we want to recursively inline the call. */
461 static void
462 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
463 fibheap_t heap)
465 static int priority;
466 struct cgraph_edge *e;
467 for (e = where->callees; e; e = e->next_callee)
468 if (e->callee == node)
470 /* When profile feedback is available, prioritize by expected number
471 of calls. Without profile feedback we maintain simple queue
472 to order candidates via recursive depths. */
473 fibheap_insert (heap,
474 !max_count ? priority++
475 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
478 for (e = where->callees; e; e = e->next_callee)
479 if (!e->inline_failed)
480 lookup_recursive_calls (node, e->callee, heap);
483 /* Find callgraph nodes closing a circle in the graph. The
484 resulting hashtab can be used to avoid walking the circles.
485 Uses the cgraph nodes ->aux field which needs to be zero
486 before and will be zero after operation. */
488 static void
489 cgraph_find_cycles (struct cgraph_node *node, htab_t cycles)
491 struct cgraph_edge *e;
493 if (node->aux)
495 void **slot;
496 slot = htab_find_slot (cycles, node, INSERT);
497 if (!*slot)
499 if (dump_file)
500 fprintf (dump_file, "Cycle contains %s\n", cgraph_node_name (node));
501 *slot = node;
503 return;
506 node->aux = node;
507 for (e = node->callees; e; e = e->next_callee)
508 cgraph_find_cycles (e->callee, cycles);
509 node->aux = 0;
512 /* Leafify the cgraph node. We have to be careful in recursing
513 as to not run endlessly in circles of the callgraph.
514 We do so by using a hashtab of cycle entering nodes as generated
515 by cgraph_find_cycles. */
517 static void
518 cgraph_flatten_node (struct cgraph_node *node, htab_t cycles)
520 struct cgraph_edge *e;
522 for (e = node->callees; e; e = e->next_callee)
524 /* Inline call, if possible, and recurse. Be sure we are not
525 entering callgraph circles here. */
526 if (e->inline_failed
527 && e->callee->local.inlinable
528 && !cgraph_recursive_inlining_p (node, e->callee,
529 &e->inline_failed)
530 && !htab_find (cycles, e->callee))
532 if (dump_file)
533 fprintf (dump_file, " inlining %s", cgraph_node_name (e->callee));
534 cgraph_mark_inline_edge (e, true);
535 cgraph_flatten_node (e->callee, cycles);
537 else if (dump_file)
538 fprintf (dump_file, " !inlining %s", cgraph_node_name (e->callee));
542 /* Decide on recursive inlining: in the case function has recursive calls,
543 inline until body size reaches given argument. */
545 static bool
546 cgraph_decide_recursive_inlining (struct cgraph_node *node)
548 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
549 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
550 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
551 fibheap_t heap;
552 struct cgraph_edge *e;
553 struct cgraph_node *master_clone;
554 int depth = 0;
555 int n = 0;
557 if (DECL_DECLARED_INLINE_P (node->decl))
559 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
560 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
563 /* Make sure that function is small enough to be considered for inlining. */
564 if (!max_depth
565 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
566 return false;
567 heap = fibheap_new ();
568 lookup_recursive_calls (node, node, heap);
569 if (fibheap_empty (heap))
571 fibheap_delete (heap);
572 return false;
575 if (dump_file)
576 fprintf (dump_file,
577 " Performing recursive inlining on %s\n",
578 cgraph_node_name (node));
580 /* We need original clone to copy around. */
581 master_clone = cgraph_clone_node (node, node->count, 1, false);
582 master_clone->needed = true;
583 for (e = master_clone->callees; e; e = e->next_callee)
584 if (!e->inline_failed)
585 cgraph_clone_inlined_nodes (e, true, false);
587 /* Do the inlining and update list of recursive call during process. */
588 while (!fibheap_empty (heap)
589 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
590 <= limit))
592 struct cgraph_edge *curr = fibheap_extract_min (heap);
593 struct cgraph_node *cnode;
595 depth = 1;
596 for (cnode = curr->caller;
597 cnode->global.inlined_to; cnode = cnode->callers->caller)
598 if (node->decl == curr->callee->decl)
599 depth++;
600 if (depth > max_depth)
602 if (dump_file)
603 fprintf (dump_file,
604 " maxmal depth reached\n");
605 continue;
608 if (max_count)
610 if (!cgraph_maybe_hot_edge_p (curr))
612 if (dump_file)
613 fprintf (dump_file, " Not inlining cold call\n");
614 continue;
616 if (curr->count * 100 / node->count < probability)
618 if (dump_file)
619 fprintf (dump_file,
620 " Probability of edge is too small\n");
621 continue;
625 if (dump_file)
627 fprintf (dump_file,
628 " Inlining call of depth %i", depth);
629 if (node->count)
631 fprintf (dump_file, " called approx. %.2f times per call",
632 (double)curr->count / node->count);
634 fprintf (dump_file, "\n");
636 cgraph_redirect_edge_callee (curr, master_clone);
637 cgraph_mark_inline_edge (curr, false);
638 lookup_recursive_calls (node, curr->callee, heap);
639 n++;
641 if (!fibheap_empty (heap) && dump_file)
642 fprintf (dump_file, " Recursive inlining growth limit met.\n");
644 fibheap_delete (heap);
645 if (dump_file)
646 fprintf (dump_file,
647 "\n Inlined %i times, body grown from %i to %i insns\n", n,
648 master_clone->global.insns, node->global.insns);
650 /* Remove master clone we used for inlining. We rely that clones inlined
651 into master clone gets queued just before master clone so we don't
652 need recursion. */
653 for (node = cgraph_nodes; node != master_clone;
654 node = node->next)
655 if (node->global.inlined_to == master_clone)
656 cgraph_remove_node (node);
657 cgraph_remove_node (master_clone);
658 /* FIXME: Recursive inlining actually reduces number of calls of the
659 function. At this place we should probably walk the function and
660 inline clones and compensate the counts accordingly. This probably
661 doesn't matter much in practice. */
662 return n > 0;
665 /* Set inline_failed for all callers of given function to REASON. */
667 static void
668 cgraph_set_inline_failed (struct cgraph_node *node, const char *reason)
670 struct cgraph_edge *e;
672 if (dump_file)
673 fprintf (dump_file, "Inlining failed: %s\n", reason);
674 for (e = node->callers; e; e = e->next_caller)
675 if (e->inline_failed)
676 e->inline_failed = reason;
679 /* We use greedy algorithm for inlining of small functions:
680 All inline candidates are put into prioritized heap based on estimated
681 growth of the overall number of instructions and then update the estimates.
683 INLINED and INLINED_CALEES are just pointers to arrays large enough
684 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
686 static void
687 cgraph_decide_inlining_of_small_functions (void)
689 struct cgraph_node *node;
690 struct cgraph_edge *edge;
691 const char *failed_reason;
692 fibheap_t heap = fibheap_new ();
693 bitmap updated_nodes = BITMAP_ALLOC (NULL);
695 if (dump_file)
696 fprintf (dump_file, "\nDeciding on smaller functions:\n");
698 /* Put all inline candidates into the heap. */
700 for (node = cgraph_nodes; node; node = node->next)
702 if (!node->local.inlinable || !node->callers
703 || node->local.disregard_inline_limits)
704 continue;
705 if (dump_file)
706 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
708 node->global.estimated_growth = INT_MIN;
709 if (!cgraph_default_inline_p (node, &failed_reason))
711 cgraph_set_inline_failed (node, failed_reason);
712 continue;
715 for (edge = node->callers; edge; edge = edge->next_caller)
716 if (edge->inline_failed)
718 gcc_assert (!edge->aux);
719 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
722 while (overall_insns <= max_insns && (edge = fibheap_extract_min (heap)))
724 int old_insns = overall_insns;
725 struct cgraph_node *where;
726 int growth =
727 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
729 growth -= edge->caller->global.insns;
731 if (dump_file)
733 fprintf (dump_file,
734 "\nConsidering %s with %i insns\n",
735 cgraph_node_name (edge->callee),
736 edge->callee->global.insns);
737 fprintf (dump_file,
738 " to be inlined into %s\n"
739 " Estimated growth after inlined into all callees is %+i insns.\n"
740 " Estimated badness is %i.\n",
741 cgraph_node_name (edge->caller),
742 cgraph_estimate_growth (edge->callee),
743 cgraph_edge_badness (edge));
744 if (edge->count)
745 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
747 gcc_assert (edge->aux);
748 edge->aux = NULL;
749 if (!edge->inline_failed)
750 continue;
752 /* When not having profile info ready we don't weight by any way the
753 position of call in procedure itself. This means if call of
754 function A from function B seems profitable to inline, the recursive
755 call of function A in inline copy of A in B will look profitable too
756 and we end up inlining until reaching maximal function growth. This
757 is not good idea so prohibit the recursive inlining.
759 ??? When the frequencies are taken into account we might not need this
760 restriction. */
761 if (!max_count)
763 where = edge->caller;
764 while (where->global.inlined_to)
766 if (where->decl == edge->callee->decl)
767 break;
768 where = where->callers->caller;
770 if (where->global.inlined_to)
772 edge->inline_failed
773 = (edge->callee->local.disregard_inline_limits ? N_("recursive inlining") : "");
774 if (dump_file)
775 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
776 continue;
780 if (!cgraph_maybe_hot_edge_p (edge) && growth > 0)
782 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
783 &edge->inline_failed))
785 edge->inline_failed =
786 N_("call is unlikely");
787 if (dump_file)
788 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
790 continue;
792 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
794 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
795 &edge->inline_failed))
797 if (dump_file)
798 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
800 continue;
802 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
803 &edge->inline_failed))
805 where = edge->caller;
806 if (where->global.inlined_to)
807 where = where->global.inlined_to;
808 if (!cgraph_decide_recursive_inlining (where))
809 continue;
810 update_callee_keys (heap, where, updated_nodes);
812 else
814 struct cgraph_node *callee;
815 if (!cgraph_check_inline_limits (edge->caller, edge->callee,
816 &edge->inline_failed))
818 if (dump_file)
819 fprintf (dump_file, " Not inlining into %s:%s.\n",
820 cgraph_node_name (edge->caller), edge->inline_failed);
821 continue;
823 callee = edge->callee;
824 cgraph_mark_inline_edge (edge, true);
825 update_callee_keys (heap, callee, updated_nodes);
827 where = edge->caller;
828 if (where->global.inlined_to)
829 where = where->global.inlined_to;
831 /* Our profitability metric can depend on local properties
832 such as number of inlinable calls and size of the function body.
833 After inlining these properties might change for the function we
834 inlined into (since it's body size changed) and for the functions
835 called by function we inlined (since number of it inlinable callers
836 might change). */
837 update_caller_keys (heap, where, updated_nodes);
838 bitmap_clear (updated_nodes);
840 if (dump_file)
842 fprintf (dump_file,
843 " Inlined into %s which now has %i insns,"
844 "net change of %+i insns.\n",
845 cgraph_node_name (edge->caller),
846 edge->caller->global.insns,
847 overall_insns - old_insns);
850 while ((edge = fibheap_extract_min (heap)) != NULL)
852 gcc_assert (edge->aux);
853 edge->aux = NULL;
854 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
855 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
856 &edge->inline_failed))
857 edge->inline_failed = N_("--param inline-unit-growth limit reached");
859 fibheap_delete (heap);
860 BITMAP_FREE (updated_nodes);
863 /* Decide on the inlining. We do so in the topological order to avoid
864 expenses on updating data structures. */
866 static void
867 cgraph_decide_inlining (void)
869 struct cgraph_node *node;
870 int nnodes;
871 struct cgraph_node **order =
872 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
873 int old_insns = 0;
874 int i;
876 timevar_push (TV_INLINE_HEURISTICS);
877 max_count = 0;
878 for (node = cgraph_nodes; node; node = node->next)
880 struct cgraph_edge *e;
881 initial_insns += node->local.self_insns;
882 for (e = node->callees; e; e = e->next_callee)
883 if (max_count < e->count)
884 max_count = e->count;
886 overall_insns = initial_insns;
887 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
889 max_insns = overall_insns;
890 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
891 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
893 max_insns = ((HOST_WIDEST_INT) max_insns
894 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
896 nnodes = cgraph_postorder (order);
898 if (dump_file)
899 fprintf (dump_file,
900 "\nDeciding on inlining. Starting with %i insns.\n",
901 initial_insns);
903 for (node = cgraph_nodes; node; node = node->next)
904 node->aux = 0;
906 if (dump_file)
907 fprintf (dump_file, "\nInlining always_inline functions:\n");
909 /* In the first pass mark all always_inline edges. Do this with a priority
910 so none of our later choices will make this impossible. */
911 for (i = nnodes - 1; i >= 0; i--)
913 struct cgraph_edge *e, *next;
915 node = order[i];
917 /* Handle nodes to be flattened, but don't update overall unit size. */
918 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
920 int old_overall_insns = overall_insns;
921 htab_t cycles;
922 if (dump_file)
923 fprintf (dump_file,
924 "Leafifying %s\n", cgraph_node_name (node));
925 cycles = htab_create (7, htab_hash_pointer, htab_eq_pointer, NULL);
926 cgraph_find_cycles (node, cycles);
927 cgraph_flatten_node (node, cycles);
928 htab_delete (cycles);
929 overall_insns = old_overall_insns;
930 /* We don't need to consider always_inline functions inside the flattened
931 function anymore. */
932 continue;
935 if (!node->local.disregard_inline_limits)
936 continue;
937 if (dump_file)
938 fprintf (dump_file,
939 "\nConsidering %s %i insns (always inline)\n",
940 cgraph_node_name (node), node->global.insns);
941 old_insns = overall_insns;
942 for (e = node->callers; e; e = next)
944 next = e->next_caller;
945 if (!e->inline_failed)
946 continue;
947 if (cgraph_recursive_inlining_p (e->caller, e->callee,
948 &e->inline_failed))
949 continue;
950 cgraph_mark_inline_edge (e, true);
951 if (dump_file)
952 fprintf (dump_file,
953 " Inlined into %s which now has %i insns.\n",
954 cgraph_node_name (e->caller),
955 e->caller->global.insns);
957 if (dump_file)
958 fprintf (dump_file,
959 " Inlined for a net change of %+i insns.\n",
960 overall_insns - old_insns);
963 if (!flag_really_no_inline)
964 cgraph_decide_inlining_of_small_functions ();
966 if (!flag_really_no_inline
967 && flag_inline_functions_called_once)
969 if (dump_file)
970 fprintf (dump_file, "\nDeciding on functions called once:\n");
972 /* And finally decide what functions are called once. */
974 for (i = nnodes - 1; i >= 0; i--)
976 node = order[i];
978 if (node->callers && !node->callers->next_caller && !node->needed
979 && node->local.inlinable && node->callers->inline_failed
980 && !DECL_EXTERNAL (node->decl) && !DECL_COMDAT (node->decl))
982 bool ok = true;
983 struct cgraph_node *node1;
985 /* Verify that we won't duplicate the caller. */
986 for (node1 = node->callers->caller;
987 node1->callers && !node1->callers->inline_failed
988 && ok; node1 = node1->callers->caller)
989 if (node1->callers->next_caller || node1->needed)
990 ok = false;
991 if (ok)
993 if (dump_file)
995 fprintf (dump_file,
996 "\nConsidering %s %i insns.\n",
997 cgraph_node_name (node), node->global.insns);
998 fprintf (dump_file,
999 " Called once from %s %i insns.\n",
1000 cgraph_node_name (node->callers->caller),
1001 node->callers->caller->global.insns);
1004 old_insns = overall_insns;
1006 if (cgraph_check_inline_limits (node->callers->caller, node,
1007 NULL))
1009 cgraph_mark_inline (node->callers);
1010 if (dump_file)
1011 fprintf (dump_file,
1012 " Inlined into %s which now has %i insns"
1013 " for a net change of %+i insns.\n",
1014 cgraph_node_name (node->callers->caller),
1015 node->callers->caller->global.insns,
1016 overall_insns - old_insns);
1018 else
1020 if (dump_file)
1021 fprintf (dump_file,
1022 " Inline limit reached, not inlined.\n");
1029 if (dump_file)
1030 fprintf (dump_file,
1031 "\nInlined %i calls, eliminated %i functions, "
1032 "%i insns turned to %i insns.\n\n",
1033 ncalls_inlined, nfunctions_inlined, initial_insns,
1034 overall_insns);
1035 free (order);
1036 timevar_pop (TV_INLINE_HEURISTICS);
1039 /* Decide on the inlining. We do so in the topological order to avoid
1040 expenses on updating data structures. */
1042 bool
1043 cgraph_decide_inlining_incrementally (struct cgraph_node *node, bool early)
1045 struct cgraph_edge *e;
1046 bool inlined = false;
1047 const char *failed_reason;
1049 /* First of all look for always inline functions. */
1050 for (e = node->callees; e; e = e->next_callee)
1051 if (e->callee->local.disregard_inline_limits
1052 && e->inline_failed
1053 && !cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed)
1054 /* ??? It is possible that renaming variable removed the function body
1055 in duplicate_decls. See gcc.c-torture/compile/20011119-2.c */
1056 && (DECL_SAVED_TREE (e->callee->decl) || e->callee->inline_decl))
1058 if (dump_file && early)
1060 fprintf (dump_file, " Early inlining %s",
1061 cgraph_node_name (e->callee));
1062 fprintf (dump_file, " into %s\n", cgraph_node_name (node));
1064 cgraph_mark_inline (e);
1065 inlined = true;
1068 /* Now do the automatic inlining. */
1069 if (!flag_really_no_inline)
1070 for (e = node->callees; e; e = e->next_callee)
1071 if (e->callee->local.inlinable
1072 && e->inline_failed
1073 && !e->callee->local.disregard_inline_limits
1074 && !cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed)
1075 && (!early
1076 || (cgraph_estimate_size_after_inlining (1, e->caller, node)
1077 <= e->caller->global.insns))
1078 && cgraph_check_inline_limits (node, e->callee, &e->inline_failed)
1079 && (DECL_SAVED_TREE (e->callee->decl) || e->callee->inline_decl))
1081 if (cgraph_default_inline_p (e->callee, &failed_reason))
1083 if (dump_file && early)
1085 fprintf (dump_file, " Early inlining %s",
1086 cgraph_node_name (e->callee));
1087 fprintf (dump_file, " into %s\n", cgraph_node_name (node));
1089 cgraph_mark_inline (e);
1090 inlined = true;
1092 else if (!early)
1093 e->inline_failed = failed_reason;
1095 if (early && inlined)
1097 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
1098 tree_register_cfg_hooks ();
1099 current_function_decl = node->decl;
1100 optimize_inline_calls (current_function_decl);
1101 node->local.self_insns = node->global.insns;
1102 current_function_decl = NULL;
1103 pop_cfun ();
1105 return inlined;
1108 /* When inlining shall be performed. */
1109 static bool
1110 cgraph_gate_inlining (void)
1112 return flag_inline_trees;
1115 struct tree_opt_pass pass_ipa_inline =
1117 "inline", /* name */
1118 cgraph_gate_inlining, /* gate */
1119 cgraph_decide_inlining, /* execute */
1120 NULL, /* sub */
1121 NULL, /* next */
1122 0, /* static_pass_number */
1123 TV_INTEGRATION, /* tv_id */
1124 0, /* properties_required */
1125 PROP_cfg, /* properties_provided */
1126 0, /* properties_destroyed */
1127 0, /* todo_flags_start */
1128 TODO_dump_cgraph | TODO_dump_func, /* todo_flags_finish */
1129 0 /* letter */
1132 /* Do inlining of small functions. Doing so early helps profiling and other
1133 passes to be somewhat more effective and avoids some code duplication in
1134 later real inlining pass for testcases with very many function calls. */
1135 static void
1136 cgraph_early_inlining (void)
1138 struct cgraph_node *node;
1139 int nnodes;
1140 struct cgraph_node **order =
1141 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1142 int i;
1144 if (sorrycount || errorcount)
1145 return;
1146 #ifdef ENABLE_CHECKING
1147 for (node = cgraph_nodes; node; node = node->next)
1148 gcc_assert (!node->aux);
1149 #endif
1151 nnodes = cgraph_postorder (order);
1152 for (i = nnodes - 1; i >= 0; i--)
1154 node = order[i];
1155 if (node->analyzed && node->local.inlinable
1156 && (node->needed || node->reachable)
1157 && node->callers)
1158 cgraph_decide_inlining_incrementally (node, true);
1160 cgraph_remove_unreachable_nodes (true, dump_file);
1161 #ifdef ENABLE_CHECKING
1162 for (node = cgraph_nodes; node; node = node->next)
1163 gcc_assert (!node->global.inlined_to);
1164 #endif
1165 free (order);
1168 /* When inlining shall be performed. */
1169 static bool
1170 cgraph_gate_early_inlining (void)
1172 return flag_inline_trees && flag_early_inlining;
1175 struct tree_opt_pass pass_early_ipa_inline =
1177 "einline", /* name */
1178 cgraph_gate_early_inlining, /* gate */
1179 cgraph_early_inlining, /* execute */
1180 NULL, /* sub */
1181 NULL, /* next */
1182 0, /* static_pass_number */
1183 TV_INTEGRATION, /* tv_id */
1184 0, /* properties_required */
1185 PROP_cfg, /* properties_provided */
1186 0, /* properties_destroyed */
1187 0, /* todo_flags_start */
1188 TODO_dump_cgraph | TODO_dump_func, /* todo_flags_finish */
1189 0 /* letter */