2009-05-21 Richard Guenther <rguenther@suse.de>
[official-gcc.git] / gcc / ipa-inline.c
blob4f37ff3074978c41914e6688c5f94ffbb7f48e0d
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
26 callgraph.
28 There are three major parts of this file:
30 cgraph_mark_inline implementation
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
34 statistics)
36 inlining heuristics limits
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
40 growth and so on).
42 inlining heuristics
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used by early inliner.
65 The inliner itself is split into several passes:
67 pass_inline_parameters
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
76 on the function body.
78 pass_early_inlining
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
83 whole unit.
85 The pass is run during conversion into SSA form. Only functions already
86 converted into SSA form are inlined, so the conversion must happen in
87 topological order on the callgraph (that is maintained by pass manager).
88 The functions after inlining are early optimized so the early inliner sees
89 unoptimized function itself, but all considered callees are already
90 optimized allowing it to unfold abstraction penalty on C++ effectively and
91 cheaply.
93 pass_ipa_early_inlining
95 With profiling, the early inlining is also necessary to reduce
96 instrumentation costs on program with high abstraction penalty (doing
97 many redundant calls). This can't happen in parallel with early
98 optimization and profile instrumentation, because we would end up
99 re-instrumenting already instrumented function bodies we brought in via
100 inlining.
102 To avoid this, this pass is executed as IPA pass before profiling. It is
103 simple wrapper to pass_early_inlining and ensures first inlining.
105 pass_ipa_inline
107 This is the main pass implementing simple greedy algorithm to do inlining
108 of small functions that results in overall growth of compilation unit and
109 inlining of functions called once. The pass compute just so called inline
110 plan (representation of inlining to be done in callgraph) and unlike early
111 inlining it is not performing the inlining itself.
113 pass_apply_inline
115 This pass performs actual inlining according to pass_ipa_inline on given
116 function. Possible the function body before inlining is saved when it is
117 needed for further inlining later.
120 #include "config.h"
121 #include "system.h"
122 #include "coretypes.h"
123 #include "tm.h"
124 #include "tree.h"
125 #include "tree-inline.h"
126 #include "langhooks.h"
127 #include "flags.h"
128 #include "cgraph.h"
129 #include "diagnostic.h"
130 #include "timevar.h"
131 #include "params.h"
132 #include "fibheap.h"
133 #include "intl.h"
134 #include "tree-pass.h"
135 #include "hashtab.h"
136 #include "coverage.h"
137 #include "ggc.h"
138 #include "tree-flow.h"
139 #include "rtl.h"
140 #include "ipa-prop.h"
142 /* Mode incremental inliner operate on:
144 In ALWAYS_INLINE only functions marked
145 always_inline are inlined. This mode is used after detecting cycle during
146 flattening.
148 In SIZE mode, only functions that reduce function body size after inlining
149 are inlined, this is used during early inlining.
151 in ALL mode, everything is inlined. This is used during flattening. */
152 enum inlining_mode {
153 INLINE_NONE = 0,
154 INLINE_ALWAYS_INLINE,
155 INLINE_SIZE_NORECURSIVE,
156 INLINE_SIZE,
157 INLINE_ALL
159 static bool
160 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
161 int);
164 /* Statistics we collect about inlining algorithm. */
165 static int ncalls_inlined;
166 static int nfunctions_inlined;
167 static int overall_insns;
168 static gcov_type max_count;
170 /* Holders of ipa cgraph hooks: */
171 static struct cgraph_node_hook_list *function_insertion_hook_holder;
173 static inline struct inline_summary *
174 inline_summary (struct cgraph_node *node)
176 return &node->local.inline_summary;
179 /* Estimate size of the function after inlining WHAT into TO. */
181 static int
182 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
183 struct cgraph_node *what)
185 int size;
186 tree fndecl = what->decl, arg;
187 int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
189 for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
190 call_insns += estimate_move_cost (TREE_TYPE (arg));
191 size = (what->global.insns - call_insns) * times + to->global.insns;
192 gcc_assert (size >= 0);
193 return size;
196 /* E is expected to be an edge being inlined. Clone destination node of
197 the edge and redirect it to the new clone.
198 DUPLICATE is used for bookkeeping on whether we are actually creating new
199 clones or re-using node originally representing out-of-line function call.
201 void
202 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
203 bool update_original)
205 HOST_WIDE_INT peak;
207 if (duplicate)
209 /* We may eliminate the need for out-of-line copy to be output.
210 In that case just go ahead and re-use it. */
211 if (!e->callee->callers->next_caller
212 && !e->callee->needed
213 && !cgraph_new_nodes)
215 gcc_assert (!e->callee->global.inlined_to);
216 if (e->callee->analyzed)
217 overall_insns -= e->callee->global.insns, nfunctions_inlined++;
218 duplicate = false;
220 else
222 struct cgraph_node *n;
223 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
224 update_original);
225 cgraph_redirect_edge_callee (e, n);
229 if (e->caller->global.inlined_to)
230 e->callee->global.inlined_to = e->caller->global.inlined_to;
231 else
232 e->callee->global.inlined_to = e->caller;
233 e->callee->global.stack_frame_offset
234 = e->caller->global.stack_frame_offset
235 + inline_summary (e->caller)->estimated_self_stack_size;
236 peak = e->callee->global.stack_frame_offset
237 + inline_summary (e->callee)->estimated_self_stack_size;
238 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
239 e->callee->global.inlined_to->global.estimated_stack_size = peak;
241 /* Recursively clone all bodies. */
242 for (e = e->callee->callees; e; e = e->next_callee)
243 if (!e->inline_failed)
244 cgraph_clone_inlined_nodes (e, duplicate, update_original);
247 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
248 specify whether profile of original function should be updated. If any new
249 indirect edges are discovered in the process, add them to NEW_EDGES, unless
250 it is NULL. Return true iff any new callgraph edges were discovered as a
251 result of inlining. */
253 static bool
254 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
255 VEC (cgraph_edge_p, heap) **new_edges)
257 int old_insns = 0, new_insns = 0;
258 struct cgraph_node *to = NULL, *what;
259 struct cgraph_edge *curr = e;
261 gcc_assert (e->inline_failed);
262 e->inline_failed = CIF_OK;
264 if (!e->callee->global.inlined)
265 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
266 e->callee->global.inlined = true;
268 cgraph_clone_inlined_nodes (e, true, update_original);
270 what = e->callee;
272 /* Now update size of caller and all functions caller is inlined into. */
273 for (;e && !e->inline_failed; e = e->caller->callers)
275 old_insns = e->caller->global.insns;
276 new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
277 what);
278 gcc_assert (new_insns >= 0);
279 to = e->caller;
280 to->global.insns = new_insns;
282 gcc_assert (what->global.inlined_to == to);
283 if (new_insns > old_insns)
284 overall_insns += new_insns - old_insns;
285 ncalls_inlined++;
287 if (flag_indirect_inlining)
288 return ipa_propagate_indirect_call_infos (curr, new_edges);
289 else
290 return false;
293 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
294 Return following unredirected edge in the list of callers
295 of EDGE->CALLEE */
297 static struct cgraph_edge *
298 cgraph_mark_inline (struct cgraph_edge *edge)
300 struct cgraph_node *to = edge->caller;
301 struct cgraph_node *what = edge->callee;
302 struct cgraph_edge *e, *next;
304 gcc_assert (!gimple_call_cannot_inline_p (edge->call_stmt));
305 /* Look for all calls, mark them inline and clone recursively
306 all inlined functions. */
307 for (e = what->callers; e; e = next)
309 next = e->next_caller;
310 if (e->caller == to && e->inline_failed)
312 cgraph_mark_inline_edge (e, true, NULL);
313 if (e == edge)
314 edge = next;
318 return edge;
321 /* Estimate the growth caused by inlining NODE into all callees. */
323 static int
324 cgraph_estimate_growth (struct cgraph_node *node)
326 int growth = 0;
327 struct cgraph_edge *e;
328 bool self_recursive = false;
330 if (node->global.estimated_growth != INT_MIN)
331 return node->global.estimated_growth;
333 for (e = node->callers; e; e = e->next_caller)
335 if (e->caller == node)
336 self_recursive = true;
337 if (e->inline_failed)
338 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
339 - e->caller->global.insns);
342 /* ??? Wrong for non-trivially self recursive functions or cases where
343 we decide to not inline for different reasons, but it is not big deal
344 as in that case we will keep the body around, but we will also avoid
345 some inlining. */
346 if (!node->needed && !DECL_EXTERNAL (node->decl) && !self_recursive)
347 growth -= node->global.insns;
349 node->global.estimated_growth = growth;
350 return growth;
353 /* Return false when inlining WHAT into TO is not good idea
354 as it would cause too large growth of function bodies.
355 When ONE_ONLY is true, assume that only one call site is going
356 to be inlined, otherwise figure out how many call sites in
357 TO calls WHAT and verify that all can be inlined.
360 static bool
361 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
362 cgraph_inline_failed_t *reason, bool one_only)
364 int times = 0;
365 struct cgraph_edge *e;
366 int newsize;
367 int limit;
368 HOST_WIDE_INT stack_size_limit, inlined_stack;
370 if (one_only)
371 times = 1;
372 else
373 for (e = to->callees; e; e = e->next_callee)
374 if (e->callee == what)
375 times++;
377 if (to->global.inlined_to)
378 to = to->global.inlined_to;
380 /* When inlining large function body called once into small function,
381 take the inlined function as base for limiting the growth. */
382 if (inline_summary (to)->self_insns > inline_summary(what)->self_insns)
383 limit = inline_summary (to)->self_insns;
384 else
385 limit = inline_summary (what)->self_insns;
387 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
389 /* Check the size after inlining against the function limits. But allow
390 the function to shrink if it went over the limits by forced inlining. */
391 newsize = cgraph_estimate_size_after_inlining (times, to, what);
392 if (newsize >= to->global.insns
393 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
394 && newsize > limit)
396 if (reason)
397 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
398 return false;
401 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
403 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
405 inlined_stack = (to->global.stack_frame_offset
406 + inline_summary (to)->estimated_self_stack_size
407 + what->global.estimated_stack_size);
408 if (inlined_stack > stack_size_limit
409 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
411 if (reason)
412 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
413 return false;
415 return true;
418 /* Return true when function N is small enough to be inlined. */
420 static bool
421 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
423 tree decl = n->decl;
425 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
427 if (reason)
428 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
429 return false;
432 if (!n->analyzed)
434 if (reason)
435 *reason = CIF_BODY_NOT_AVAILABLE;
436 return false;
439 if (DECL_DECLARED_INLINE_P (decl))
441 if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
443 if (reason)
444 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
445 return false;
448 else
450 if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
452 if (reason)
453 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
454 return false;
458 return true;
461 /* Return true when inlining WHAT would create recursive inlining.
462 We call recursive inlining all cases where same function appears more than
463 once in the single recursion nest path in the inline graph. */
465 static bool
466 cgraph_recursive_inlining_p (struct cgraph_node *to,
467 struct cgraph_node *what,
468 cgraph_inline_failed_t *reason)
470 bool recursive;
471 if (to->global.inlined_to)
472 recursive = what->decl == to->global.inlined_to->decl;
473 else
474 recursive = what->decl == to->decl;
475 /* Marking recursive function inline has sane semantic and thus we should
476 not warn on it. */
477 if (recursive && reason)
478 *reason = (what->local.disregard_inline_limits
479 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
480 return recursive;
483 /* A cost model driving the inlining heuristics in a way so the edges with
484 smallest badness are inlined first. After each inlining is performed
485 the costs of all caller edges of nodes affected are recomputed so the
486 metrics may accurately depend on values such as number of inlinable callers
487 of the function or function body size. */
489 static int
490 cgraph_edge_badness (struct cgraph_edge *edge)
492 int badness;
493 int growth =
494 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
496 growth -= edge->caller->global.insns;
498 /* Always prefer inlining saving code size. */
499 if (growth <= 0)
500 badness = INT_MIN - growth;
502 /* When profiling is available, base priorities -(#calls / growth).
503 So we optimize for overall number of "executed" inlined calls. */
504 else if (max_count)
505 badness = ((int)((double)edge->count * INT_MIN / max_count)) / growth;
507 /* When function local profile is available, base priorities on
508 growth / frequency, so we optimize for overall frequency of inlined
509 calls. This is not too accurate since while the call might be frequent
510 within function, the function itself is infrequent.
512 Other objective to optimize for is number of different calls inlined.
513 We add the estimated growth after inlining all functions to bias the
514 priorities slightly in this direction (so fewer times called functions
515 of the same size gets priority). */
516 else if (flag_guess_branch_prob)
518 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE;
519 int growth =
520 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
521 growth -= edge->caller->global.insns;
522 badness = growth * 256;
524 /* Decrease badness if call is nested. */
525 /* Compress the range so we don't overflow. */
526 if (div > 256)
527 div = 256 + ceil_log2 (div) - 8;
528 if (div < 1)
529 div = 1;
530 if (badness > 0)
531 badness /= div;
532 badness += cgraph_estimate_growth (edge->callee);
534 /* When function local profile is not available or it does not give
535 useful information (ie frequency is zero), base the cost on
536 loop nest and overall size growth, so we optimize for overall number
537 of functions fully inlined in program. */
538 else
540 int nest = MIN (edge->loop_nest, 8);
541 badness = cgraph_estimate_growth (edge->callee) * 256;
543 /* Decrease badness if call is nested. */
544 if (badness > 0)
545 badness >>= nest;
546 else
548 badness <<= nest;
551 /* Make recursive inlining happen always after other inlining is done. */
552 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
553 return badness + 1;
554 else
555 return badness;
558 /* Recompute heap nodes for each of caller edge. */
560 static void
561 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
562 bitmap updated_nodes)
564 struct cgraph_edge *edge;
565 cgraph_inline_failed_t failed_reason;
567 if (!node->local.inlinable || node->local.disregard_inline_limits
568 || node->global.inlined_to)
569 return;
570 if (bitmap_bit_p (updated_nodes, node->uid))
571 return;
572 bitmap_set_bit (updated_nodes, node->uid);
573 node->global.estimated_growth = INT_MIN;
575 if (!node->local.inlinable)
576 return;
577 /* Prune out edges we won't inline into anymore. */
578 if (!cgraph_default_inline_p (node, &failed_reason))
580 for (edge = node->callers; edge; edge = edge->next_caller)
581 if (edge->aux)
583 fibheap_delete_node (heap, (fibnode_t) edge->aux);
584 edge->aux = NULL;
585 if (edge->inline_failed)
586 edge->inline_failed = failed_reason;
588 return;
591 for (edge = node->callers; edge; edge = edge->next_caller)
592 if (edge->inline_failed)
594 int badness = cgraph_edge_badness (edge);
595 if (edge->aux)
597 fibnode_t n = (fibnode_t) edge->aux;
598 gcc_assert (n->data == edge);
599 if (n->key == badness)
600 continue;
602 /* fibheap_replace_key only increase the keys. */
603 if (fibheap_replace_key (heap, n, badness))
604 continue;
605 fibheap_delete_node (heap, (fibnode_t) edge->aux);
607 edge->aux = fibheap_insert (heap, badness, edge);
611 /* Recompute heap nodes for each of caller edges of each of callees. */
613 static void
614 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
615 bitmap updated_nodes)
617 struct cgraph_edge *e;
618 node->global.estimated_growth = INT_MIN;
620 for (e = node->callees; e; e = e->next_callee)
621 if (e->inline_failed)
622 update_caller_keys (heap, e->callee, updated_nodes);
623 else if (!e->inline_failed)
624 update_callee_keys (heap, e->callee, updated_nodes);
627 /* Enqueue all recursive calls from NODE into priority queue depending on
628 how likely we want to recursively inline the call. */
630 static void
631 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
632 fibheap_t heap)
634 static int priority;
635 struct cgraph_edge *e;
636 for (e = where->callees; e; e = e->next_callee)
637 if (e->callee == node)
639 /* When profile feedback is available, prioritize by expected number
640 of calls. Without profile feedback we maintain simple queue
641 to order candidates via recursive depths. */
642 fibheap_insert (heap,
643 !max_count ? priority++
644 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
647 for (e = where->callees; e; e = e->next_callee)
648 if (!e->inline_failed)
649 lookup_recursive_calls (node, e->callee, heap);
652 /* Decide on recursive inlining: in the case function has recursive calls,
653 inline until body size reaches given argument. If any new indirect edges
654 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
655 is NULL. */
657 static bool
658 cgraph_decide_recursive_inlining (struct cgraph_node *node,
659 VEC (cgraph_edge_p, heap) **new_edges)
661 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
662 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
663 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
664 fibheap_t heap;
665 struct cgraph_edge *e;
666 struct cgraph_node *master_clone, *next;
667 int depth = 0;
668 int n = 0;
670 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
671 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
672 return false;
674 if (DECL_DECLARED_INLINE_P (node->decl))
676 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
677 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
680 /* Make sure that function is small enough to be considered for inlining. */
681 if (!max_depth
682 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
683 return false;
684 heap = fibheap_new ();
685 lookup_recursive_calls (node, node, heap);
686 if (fibheap_empty (heap))
688 fibheap_delete (heap);
689 return false;
692 if (dump_file)
693 fprintf (dump_file,
694 " Performing recursive inlining on %s\n",
695 cgraph_node_name (node));
697 /* We need original clone to copy around. */
698 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1, false);
699 master_clone->needed = true;
700 for (e = master_clone->callees; e; e = e->next_callee)
701 if (!e->inline_failed)
702 cgraph_clone_inlined_nodes (e, true, false);
704 /* Do the inlining and update list of recursive call during process. */
705 while (!fibheap_empty (heap)
706 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
707 <= limit))
709 struct cgraph_edge *curr
710 = (struct cgraph_edge *) fibheap_extract_min (heap);
711 struct cgraph_node *cnode;
713 depth = 1;
714 for (cnode = curr->caller;
715 cnode->global.inlined_to; cnode = cnode->callers->caller)
716 if (node->decl == curr->callee->decl)
717 depth++;
718 if (depth > max_depth)
720 if (dump_file)
721 fprintf (dump_file,
722 " maximal depth reached\n");
723 continue;
726 if (max_count)
728 if (!cgraph_maybe_hot_edge_p (curr))
730 if (dump_file)
731 fprintf (dump_file, " Not inlining cold call\n");
732 continue;
734 if (curr->count * 100 / node->count < probability)
736 if (dump_file)
737 fprintf (dump_file,
738 " Probability of edge is too small\n");
739 continue;
743 if (dump_file)
745 fprintf (dump_file,
746 " Inlining call of depth %i", depth);
747 if (node->count)
749 fprintf (dump_file, " called approx. %.2f times per call",
750 (double)curr->count / node->count);
752 fprintf (dump_file, "\n");
754 cgraph_redirect_edge_callee (curr, master_clone);
755 cgraph_mark_inline_edge (curr, false, new_edges);
756 lookup_recursive_calls (node, curr->callee, heap);
757 n++;
759 if (!fibheap_empty (heap) && dump_file)
760 fprintf (dump_file, " Recursive inlining growth limit met.\n");
762 fibheap_delete (heap);
763 if (dump_file)
764 fprintf (dump_file,
765 "\n Inlined %i times, body grown from %i to %i insns\n", n,
766 master_clone->global.insns, node->global.insns);
768 /* Remove master clone we used for inlining. We rely that clones inlined
769 into master clone gets queued just before master clone so we don't
770 need recursion. */
771 for (node = cgraph_nodes; node != master_clone;
772 node = next)
774 next = node->next;
775 if (node->global.inlined_to == master_clone)
776 cgraph_remove_node (node);
778 cgraph_remove_node (master_clone);
779 /* FIXME: Recursive inlining actually reduces number of calls of the
780 function. At this place we should probably walk the function and
781 inline clones and compensate the counts accordingly. This probably
782 doesn't matter much in practice. */
783 return n > 0;
786 /* Set inline_failed for all callers of given function to REASON. */
788 static void
789 cgraph_set_inline_failed (struct cgraph_node *node,
790 cgraph_inline_failed_t reason)
792 struct cgraph_edge *e;
794 if (dump_file)
795 fprintf (dump_file, "Inlining failed: %s\n",
796 cgraph_inline_failed_string (reason));
797 for (e = node->callers; e; e = e->next_caller)
798 if (e->inline_failed)
799 e->inline_failed = reason;
802 /* Given whole compilation unit estimate of INSNS, compute how large we can
803 allow the unit to grow. */
804 static int
805 compute_max_insns (int insns)
807 int max_insns = insns;
808 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
809 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
811 return ((HOST_WIDEST_INT) max_insns
812 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
815 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
816 static void
817 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
819 while (VEC_length (cgraph_edge_p, new_edges) > 0)
821 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
823 gcc_assert (!edge->aux);
824 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
829 /* We use greedy algorithm for inlining of small functions:
830 All inline candidates are put into prioritized heap based on estimated
831 growth of the overall number of instructions and then update the estimates.
833 INLINED and INLINED_CALEES are just pointers to arrays large enough
834 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
836 static void
837 cgraph_decide_inlining_of_small_functions (void)
839 struct cgraph_node *node;
840 struct cgraph_edge *edge;
841 cgraph_inline_failed_t failed_reason;
842 fibheap_t heap = fibheap_new ();
843 bitmap updated_nodes = BITMAP_ALLOC (NULL);
844 int min_insns, max_insns;
845 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
847 if (flag_indirect_inlining)
848 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
850 if (dump_file)
851 fprintf (dump_file, "\nDeciding on smaller functions:\n");
853 /* Put all inline candidates into the heap. */
855 for (node = cgraph_nodes; node; node = node->next)
857 if (!node->local.inlinable || !node->callers
858 || node->local.disregard_inline_limits)
859 continue;
860 if (dump_file)
861 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
863 node->global.estimated_growth = INT_MIN;
864 if (!cgraph_default_inline_p (node, &failed_reason))
866 cgraph_set_inline_failed (node, failed_reason);
867 continue;
870 for (edge = node->callers; edge; edge = edge->next_caller)
871 if (edge->inline_failed)
873 gcc_assert (!edge->aux);
874 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
878 max_insns = compute_max_insns (overall_insns);
879 min_insns = overall_insns;
881 while (overall_insns <= max_insns
882 && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
884 int old_insns = overall_insns;
885 struct cgraph_node *where;
886 int growth =
887 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
888 cgraph_inline_failed_t not_good = CIF_OK;
890 growth -= edge->caller->global.insns;
892 if (dump_file)
894 fprintf (dump_file,
895 "\nConsidering %s with %i insns\n",
896 cgraph_node_name (edge->callee),
897 edge->callee->global.insns);
898 fprintf (dump_file,
899 " to be inlined into %s in %s:%i\n"
900 " Estimated growth after inlined into all callees is %+i insns.\n"
901 " Estimated badness is %i, frequency %.2f.\n",
902 cgraph_node_name (edge->caller),
903 gimple_filename ((const_gimple) edge->call_stmt),
904 gimple_lineno ((const_gimple) edge->call_stmt),
905 cgraph_estimate_growth (edge->callee),
906 cgraph_edge_badness (edge),
907 edge->frequency / (double)CGRAPH_FREQ_BASE);
908 if (edge->count)
909 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
911 gcc_assert (edge->aux);
912 edge->aux = NULL;
913 if (!edge->inline_failed)
914 continue;
916 /* When not having profile info ready we don't weight by any way the
917 position of call in procedure itself. This means if call of
918 function A from function B seems profitable to inline, the recursive
919 call of function A in inline copy of A in B will look profitable too
920 and we end up inlining until reaching maximal function growth. This
921 is not good idea so prohibit the recursive inlining.
923 ??? When the frequencies are taken into account we might not need this
924 restriction.
926 We need to be cureful here, in some testcases, e.g. directivec.c in
927 libcpp, we can estimate self recursive function to have negative growth
928 for inlining completely.
930 if (!edge->count)
932 where = edge->caller;
933 while (where->global.inlined_to)
935 if (where->decl == edge->callee->decl)
936 break;
937 where = where->callers->caller;
939 if (where->global.inlined_to)
941 edge->inline_failed
942 = (edge->callee->local.disregard_inline_limits
943 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
944 if (dump_file)
945 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
946 continue;
950 if (!cgraph_maybe_hot_edge_p (edge))
951 not_good = CIF_UNLIKELY_CALL;
952 if (!flag_inline_functions
953 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
954 not_good = CIF_NOT_DECLARED_INLINED;
955 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
956 not_good = CIF_OPTIMIZING_FOR_SIZE;
957 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
959 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
960 &edge->inline_failed))
962 edge->inline_failed = not_good;
963 if (dump_file)
964 fprintf (dump_file, " inline_failed:%s.\n",
965 cgraph_inline_failed_string (edge->inline_failed));
967 continue;
969 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
971 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
972 &edge->inline_failed))
974 if (dump_file)
975 fprintf (dump_file, " inline_failed:%s.\n",
976 cgraph_inline_failed_string (edge->inline_failed));
978 continue;
980 if (!tree_can_inline_p (edge->caller->decl, edge->callee->decl))
982 gimple_call_set_cannot_inline (edge->call_stmt, true);
983 edge->inline_failed = CIF_TARGET_OPTION_MISMATCH;
984 if (dump_file)
985 fprintf (dump_file, " inline_failed:%s.\n",
986 cgraph_inline_failed_string (edge->inline_failed));
987 continue;
989 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
990 &edge->inline_failed))
992 where = edge->caller;
993 if (where->global.inlined_to)
994 where = where->global.inlined_to;
995 if (!cgraph_decide_recursive_inlining (where,
996 flag_indirect_inlining
997 ? &new_indirect_edges : NULL))
998 continue;
999 if (flag_indirect_inlining)
1000 add_new_edges_to_heap (heap, new_indirect_edges);
1001 update_callee_keys (heap, where, updated_nodes);
1003 else
1005 struct cgraph_node *callee;
1006 if (gimple_call_cannot_inline_p (edge->call_stmt)
1007 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1008 &edge->inline_failed, true))
1010 if (dump_file)
1011 fprintf (dump_file, " Not inlining into %s:%s.\n",
1012 cgraph_node_name (edge->caller),
1013 cgraph_inline_failed_string (edge->inline_failed));
1014 continue;
1016 callee = edge->callee;
1017 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1018 if (flag_indirect_inlining)
1019 add_new_edges_to_heap (heap, new_indirect_edges);
1021 update_callee_keys (heap, callee, updated_nodes);
1023 where = edge->caller;
1024 if (where->global.inlined_to)
1025 where = where->global.inlined_to;
1027 /* Our profitability metric can depend on local properties
1028 such as number of inlinable calls and size of the function body.
1029 After inlining these properties might change for the function we
1030 inlined into (since it's body size changed) and for the functions
1031 called by function we inlined (since number of it inlinable callers
1032 might change). */
1033 update_caller_keys (heap, where, updated_nodes);
1034 bitmap_clear (updated_nodes);
1036 if (dump_file)
1038 fprintf (dump_file,
1039 " Inlined into %s which now has %i insns,"
1040 "net change of %+i insns.\n",
1041 cgraph_node_name (edge->caller),
1042 edge->caller->global.insns,
1043 overall_insns - old_insns);
1045 if (min_insns > overall_insns)
1047 min_insns = overall_insns;
1048 max_insns = compute_max_insns (min_insns);
1050 if (dump_file)
1051 fprintf (dump_file, "New minimal insns reached: %i\n", min_insns);
1054 while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
1056 gcc_assert (edge->aux);
1057 edge->aux = NULL;
1058 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1059 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1060 &edge->inline_failed))
1061 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1064 if (new_indirect_edges)
1065 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1066 fibheap_delete (heap);
1067 BITMAP_FREE (updated_nodes);
1070 /* Decide on the inlining. We do so in the topological order to avoid
1071 expenses on updating data structures. */
1073 static unsigned int
1074 cgraph_decide_inlining (void)
1076 struct cgraph_node *node;
1077 int nnodes;
1078 struct cgraph_node **order =
1079 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1080 int old_insns = 0;
1081 int i;
1082 int initial_insns = 0;
1083 bool redo_always_inline = true;
1085 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1087 max_count = 0;
1088 for (node = cgraph_nodes; node; node = node->next)
1089 if (node->analyzed && (node->needed || node->reachable))
1091 struct cgraph_edge *e;
1093 initial_insns += inline_summary (node)->self_insns;
1094 gcc_assert (inline_summary (node)->self_insns == node->global.insns);
1095 for (e = node->callees; e; e = e->next_callee)
1096 if (max_count < e->count)
1097 max_count = e->count;
1099 overall_insns = initial_insns;
1100 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
1102 nnodes = cgraph_postorder (order);
1104 if (dump_file)
1105 fprintf (dump_file,
1106 "\nDeciding on inlining. Starting with %i insns.\n",
1107 initial_insns);
1109 for (node = cgraph_nodes; node; node = node->next)
1110 node->aux = 0;
1112 if (dump_file)
1113 fprintf (dump_file, "\nInlining always_inline functions:\n");
1115 /* In the first pass mark all always_inline edges. Do this with a priority
1116 so none of our later choices will make this impossible. */
1117 while (redo_always_inline)
1119 redo_always_inline = false;
1120 for (i = nnodes - 1; i >= 0; i--)
1122 struct cgraph_edge *e, *next;
1124 node = order[i];
1126 /* Handle nodes to be flattened, but don't update overall unit
1127 size. */
1128 if (lookup_attribute ("flatten",
1129 DECL_ATTRIBUTES (node->decl)) != NULL)
1131 if (dump_file)
1132 fprintf (dump_file,
1133 "Flattening %s\n", cgraph_node_name (node));
1134 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1137 if (!node->local.disregard_inline_limits)
1138 continue;
1139 if (dump_file)
1140 fprintf (dump_file,
1141 "\nConsidering %s %i insns (always inline)\n",
1142 cgraph_node_name (node), node->global.insns);
1143 old_insns = overall_insns;
1144 for (e = node->callers; e; e = next)
1146 next = e->next_caller;
1147 if (!e->inline_failed
1148 || gimple_call_cannot_inline_p (e->call_stmt))
1149 continue;
1150 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1151 &e->inline_failed))
1152 continue;
1153 if (!tree_can_inline_p (e->caller->decl, e->callee->decl))
1155 gimple_call_set_cannot_inline (e->call_stmt, true);
1156 continue;
1158 if (cgraph_mark_inline_edge (e, true, NULL))
1159 redo_always_inline = true;
1160 if (dump_file)
1161 fprintf (dump_file,
1162 " Inlined into %s which now has %i insns.\n",
1163 cgraph_node_name (e->caller),
1164 e->caller->global.insns);
1166 /* Inlining self recursive function might introduce new calls to
1167 themselves we didn't see in the loop above. Fill in the proper
1168 reason why inline failed. */
1169 for (e = node->callers; e; e = e->next_caller)
1170 if (e->inline_failed)
1171 e->inline_failed = CIF_RECURSIVE_INLINING;
1172 if (dump_file)
1173 fprintf (dump_file,
1174 " Inlined for a net change of %+i insns.\n",
1175 overall_insns - old_insns);
1179 cgraph_decide_inlining_of_small_functions ();
1181 if (flag_inline_functions_called_once)
1183 if (dump_file)
1184 fprintf (dump_file, "\nDeciding on functions called once:\n");
1186 /* And finally decide what functions are called once. */
1187 for (i = nnodes - 1; i >= 0; i--)
1189 node = order[i];
1191 if (node->callers
1192 && !node->callers->next_caller
1193 && !node->needed
1194 && node->local.inlinable
1195 && node->callers->inline_failed
1196 && !gimple_call_cannot_inline_p (node->callers->call_stmt)
1197 && !DECL_EXTERNAL (node->decl)
1198 && !DECL_COMDAT (node->decl))
1200 if (dump_file)
1202 fprintf (dump_file,
1203 "\nConsidering %s %i insns.\n",
1204 cgraph_node_name (node), node->global.insns);
1205 fprintf (dump_file,
1206 " Called once from %s %i insns.\n",
1207 cgraph_node_name (node->callers->caller),
1208 node->callers->caller->global.insns);
1211 old_insns = overall_insns;
1213 if (cgraph_check_inline_limits (node->callers->caller, node,
1214 NULL, false))
1216 cgraph_mark_inline (node->callers);
1217 if (dump_file)
1218 fprintf (dump_file,
1219 " Inlined into %s which now has %i insns"
1220 " for a net change of %+i insns.\n",
1221 cgraph_node_name (node->callers->caller),
1222 node->callers->caller->global.insns,
1223 overall_insns - old_insns);
1225 else
1227 if (dump_file)
1228 fprintf (dump_file,
1229 " Inline limit reached, not inlined.\n");
1235 /* Free ipa-prop structures if they are no longer needed. */
1236 if (flag_indirect_inlining)
1237 free_all_ipa_structures_after_iinln ();
1239 if (dump_file)
1240 fprintf (dump_file,
1241 "\nInlined %i calls, eliminated %i functions, "
1242 "%i insns turned to %i insns.\n\n",
1243 ncalls_inlined, nfunctions_inlined, initial_insns,
1244 overall_insns);
1245 free (order);
1246 return 0;
1249 /* Try to inline edge E from incremental inliner. MODE specifies mode
1250 of inliner.
1252 We are detecting cycles by storing mode of inliner into cgraph_node last
1253 time we visited it in the recursion. In general when mode is set, we have
1254 recursive inlining, but as an special case, we want to try harder inline
1255 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1256 flatten, b being always inline. Flattening 'a' will collapse
1257 a->b->c before hitting cycle. To accommodate always inline, we however
1258 need to inline a->b->c->b.
1260 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1261 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1262 static bool
1263 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1265 struct cgraph_node *callee = e->callee;
1266 enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
1267 bool always_inline = e->callee->local.disregard_inline_limits;
1268 bool inlined = false;
1270 /* We've hit cycle? */
1271 if (callee_mode)
1273 /* It is first time we see it and we are not in ALWAY_INLINE only
1274 mode yet. and the function in question is always_inline. */
1275 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1277 if (dump_file)
1279 indent_to (dump_file, depth);
1280 fprintf (dump_file,
1281 "Hit cycle in %s, switching to always inline only.\n",
1282 cgraph_node_name (callee));
1284 mode = INLINE_ALWAYS_INLINE;
1286 /* Otherwise it is time to give up. */
1287 else
1289 if (dump_file)
1291 indent_to (dump_file, depth);
1292 fprintf (dump_file,
1293 "Not inlining %s into %s to avoid cycle.\n",
1294 cgraph_node_name (callee),
1295 cgraph_node_name (e->caller));
1297 e->inline_failed = (e->callee->local.disregard_inline_limits
1298 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1299 return false;
1303 callee->aux = (void *)(size_t) mode;
1304 if (dump_file)
1306 indent_to (dump_file, depth);
1307 fprintf (dump_file, " Inlining %s into %s.\n",
1308 cgraph_node_name (e->callee),
1309 cgraph_node_name (e->caller));
1311 if (e->inline_failed)
1313 cgraph_mark_inline (e);
1315 /* In order to fully inline always_inline functions, we need to
1316 recurse here, since the inlined functions might not be processed by
1317 incremental inlining at all yet.
1319 Also flattening needs to be done recursively. */
1321 if (mode == INLINE_ALL || always_inline)
1322 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1323 inlined = true;
1325 callee->aux = (void *)(size_t) callee_mode;
1326 return inlined;
1329 /* Decide on the inlining. We do so in the topological order to avoid
1330 expenses on updating data structures.
1331 DEPTH is depth of recursion, used only for debug output. */
1333 static bool
1334 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1335 enum inlining_mode mode,
1336 int depth)
1338 struct cgraph_edge *e;
1339 bool inlined = false;
1340 cgraph_inline_failed_t failed_reason;
1341 enum inlining_mode old_mode;
1343 #ifdef ENABLE_CHECKING
1344 verify_cgraph_node (node);
1345 #endif
1347 old_mode = (enum inlining_mode) (size_t)node->aux;
1349 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1350 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1352 if (dump_file)
1354 indent_to (dump_file, depth);
1355 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1357 mode = INLINE_ALL;
1360 node->aux = (void *)(size_t) mode;
1362 /* First of all look for always inline functions. */
1363 if (mode != INLINE_SIZE_NORECURSIVE)
1364 for (e = node->callees; e; e = e->next_callee)
1366 if (!e->callee->local.disregard_inline_limits
1367 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1368 continue;
1369 if (gimple_call_cannot_inline_p (e->call_stmt))
1370 continue;
1371 /* When the edge is already inlined, we just need to recurse into
1372 it in order to fully flatten the leaves. */
1373 if (!e->inline_failed && mode == INLINE_ALL)
1375 inlined |= try_inline (e, mode, depth);
1376 continue;
1378 if (dump_file)
1380 indent_to (dump_file, depth);
1381 fprintf (dump_file,
1382 "Considering to always inline inline candidate %s.\n",
1383 cgraph_node_name (e->callee));
1385 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1387 if (dump_file)
1389 indent_to (dump_file, depth);
1390 fprintf (dump_file, "Not inlining: recursive call.\n");
1392 continue;
1394 if (!tree_can_inline_p (node->decl, e->callee->decl))
1396 gimple_call_set_cannot_inline (e->call_stmt, true);
1397 if (dump_file)
1399 indent_to (dump_file, depth);
1400 fprintf (dump_file,
1401 "Not inlining: Target specific option mismatch.\n");
1403 continue;
1405 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1406 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1408 if (dump_file)
1410 indent_to (dump_file, depth);
1411 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1413 continue;
1415 if (!e->callee->analyzed)
1417 if (dump_file)
1419 indent_to (dump_file, depth);
1420 fprintf (dump_file,
1421 "Not inlining: Function body no longer available.\n");
1423 continue;
1425 inlined |= try_inline (e, mode, depth);
1428 /* Now do the automatic inlining. */
1429 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE)
1430 for (e = node->callees; e; e = e->next_callee)
1432 if (!e->callee->local.inlinable
1433 || !e->inline_failed
1434 || e->callee->local.disregard_inline_limits)
1435 continue;
1436 if (dump_file)
1437 fprintf (dump_file, "Considering inline candidate %s.\n",
1438 cgraph_node_name (e->callee));
1439 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1441 if (dump_file)
1443 indent_to (dump_file, depth);
1444 fprintf (dump_file, "Not inlining: recursive call.\n");
1446 continue;
1448 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1449 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1451 if (dump_file)
1453 indent_to (dump_file, depth);
1454 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1456 continue;
1458 /* When the function body would grow and inlining the function won't
1459 eliminate the need for offline copy of the function, don't inline.
1461 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1462 || (!flag_inline_functions
1463 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1464 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1465 > e->caller->global.insns)
1466 && cgraph_estimate_growth (e->callee) > 0)
1468 if (dump_file)
1470 indent_to (dump_file, depth);
1471 fprintf (dump_file,
1472 "Not inlining: code size would grow by %i insns.\n",
1473 cgraph_estimate_size_after_inlining (1, e->caller,
1474 e->callee)
1475 - e->caller->global.insns);
1477 continue;
1479 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1480 false)
1481 || gimple_call_cannot_inline_p (e->call_stmt))
1483 if (dump_file)
1485 indent_to (dump_file, depth);
1486 fprintf (dump_file, "Not inlining: %s.\n",
1487 cgraph_inline_failed_string (e->inline_failed));
1489 continue;
1491 if (!e->callee->analyzed)
1493 if (dump_file)
1495 indent_to (dump_file, depth);
1496 fprintf (dump_file,
1497 "Not inlining: Function body no longer available.\n");
1499 continue;
1501 if (!tree_can_inline_p (node->decl, e->callee->decl))
1503 gimple_call_set_cannot_inline (e->call_stmt, true);
1504 if (dump_file)
1506 indent_to (dump_file, depth);
1507 fprintf (dump_file,
1508 "Not inlining: Target specific option mismatch.\n");
1510 continue;
1512 if (cgraph_default_inline_p (e->callee, &failed_reason))
1513 inlined |= try_inline (e, mode, depth);
1515 node->aux = (void *)(size_t) old_mode;
1516 return inlined;
1519 /* Because inlining might remove no-longer reachable nodes, we need to
1520 keep the array visible to garbage collector to avoid reading collected
1521 out nodes. */
1522 static int nnodes;
1523 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1525 /* Do inlining of small functions. Doing so early helps profiling and other
1526 passes to be somewhat more effective and avoids some code duplication in
1527 later real inlining pass for testcases with very many function calls. */
1528 static unsigned int
1529 cgraph_early_inlining (void)
1531 struct cgraph_node *node = cgraph_node (current_function_decl);
1532 unsigned int todo = 0;
1533 int iterations = 0;
1535 if (sorrycount || errorcount)
1536 return 0;
1537 while (cgraph_decide_inlining_incrementally (node,
1538 iterations
1539 ? INLINE_SIZE_NORECURSIVE : INLINE_SIZE, 0)
1540 && iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS))
1542 timevar_push (TV_INTEGRATION);
1543 todo |= optimize_inline_calls (current_function_decl);
1544 iterations++;
1545 timevar_pop (TV_INTEGRATION);
1547 if (dump_file)
1548 fprintf (dump_file, "Iterations: %i\n", iterations);
1549 cfun->always_inline_functions_inlined = true;
1550 return todo;
1553 /* When inlining shall be performed. */
1554 static bool
1555 cgraph_gate_early_inlining (void)
1557 return flag_early_inlining;
1560 struct gimple_opt_pass pass_early_inline =
1563 GIMPLE_PASS,
1564 "einline", /* name */
1565 cgraph_gate_early_inlining, /* gate */
1566 cgraph_early_inlining, /* execute */
1567 NULL, /* sub */
1568 NULL, /* next */
1569 0, /* static_pass_number */
1570 TV_INLINE_HEURISTICS, /* tv_id */
1571 0, /* properties_required */
1572 0, /* properties_provided */
1573 0, /* properties_destroyed */
1574 0, /* todo_flags_start */
1575 TODO_dump_func /* todo_flags_finish */
1579 /* When inlining shall be performed. */
1580 static bool
1581 cgraph_gate_ipa_early_inlining (void)
1583 return (flag_early_inlining
1584 && (flag_branch_probabilities || flag_test_coverage
1585 || profile_arc_flag));
1588 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1589 before tree profiling so we have stand alone IPA pass for doing so. */
1590 struct simple_ipa_opt_pass pass_ipa_early_inline =
1593 SIMPLE_IPA_PASS,
1594 "einline_ipa", /* name */
1595 cgraph_gate_ipa_early_inlining, /* gate */
1596 NULL, /* execute */
1597 NULL, /* sub */
1598 NULL, /* next */
1599 0, /* static_pass_number */
1600 TV_INLINE_HEURISTICS, /* tv_id */
1601 0, /* properties_required */
1602 0, /* properties_provided */
1603 0, /* properties_destroyed */
1604 0, /* todo_flags_start */
1605 TODO_dump_cgraph /* todo_flags_finish */
1609 /* Compute parameters of functions used by inliner. */
1610 unsigned int
1611 compute_inline_parameters (struct cgraph_node *node)
1613 HOST_WIDE_INT self_stack_size;
1615 gcc_assert (!node->global.inlined_to);
1617 /* Estimate the stack size for the function. But not at -O0
1618 because estimated_stack_frame_size is a quadratic problem. */
1619 self_stack_size = optimize ? estimated_stack_frame_size () : 0;
1620 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1621 node->global.estimated_stack_size = self_stack_size;
1622 node->global.stack_frame_offset = 0;
1624 /* Can this function be inlined at all? */
1625 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1627 /* Estimate the number of instructions for this function.
1628 ??? At -O0 we don't use this information except for the dumps, and
1629 even then only for always_inline functions. But disabling this
1630 causes ICEs in the inline heuristics... */
1631 inline_summary (node)->self_insns
1632 = estimate_num_insns_fn (current_function_decl, &eni_inlining_weights);
1633 if (node->local.inlinable && !node->local.disregard_inline_limits)
1634 node->local.disregard_inline_limits
1635 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
1637 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1638 node->global.insns = inline_summary (node)->self_insns;
1639 return 0;
1643 /* Compute parameters of functions used by inliner using
1644 current_function_decl. */
1645 static unsigned int
1646 compute_inline_parameters_for_current (void)
1648 compute_inline_parameters (cgraph_node (current_function_decl));
1649 return 0;
1652 struct gimple_opt_pass pass_inline_parameters =
1655 GIMPLE_PASS,
1656 NULL, /* name */
1657 NULL, /* gate */
1658 compute_inline_parameters_for_current,/* execute */
1659 NULL, /* sub */
1660 NULL, /* next */
1661 0, /* static_pass_number */
1662 TV_INLINE_HEURISTICS, /* tv_id */
1663 0, /* properties_required */
1664 0, /* properties_provided */
1665 0, /* properties_destroyed */
1666 0, /* todo_flags_start */
1667 0 /* todo_flags_finish */
1671 /* This function performs intraprocedural analyzis in NODE that is required to
1672 inline indirect calls. */
1673 static void
1674 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
1676 struct cgraph_edge *cs;
1678 if (!flag_ipa_cp)
1680 ipa_initialize_node_params (node);
1681 ipa_detect_param_modifications (node);
1683 ipa_analyze_params_uses (node);
1685 if (!flag_ipa_cp)
1686 for (cs = node->callees; cs; cs = cs->next_callee)
1688 ipa_count_arguments (cs);
1689 ipa_compute_jump_functions (cs);
1692 if (dump_file)
1694 ipa_print_node_params (dump_file, node);
1695 ipa_print_node_jump_functions (dump_file, node);
1699 /* Note function body size. */
1700 static void
1701 analyze_function (struct cgraph_node *node)
1703 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
1704 current_function_decl = node->decl;
1706 compute_inline_parameters (node);
1707 if (flag_indirect_inlining)
1708 inline_indirect_intraprocedural_analysis (node);
1710 current_function_decl = NULL;
1711 pop_cfun ();
1714 /* Called when new function is inserted to callgraph late. */
1715 static void
1716 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
1718 analyze_function (node);
1721 /* Note function body size. */
1722 static void
1723 inline_generate_summary (void)
1725 struct cgraph_node *node;
1727 function_insertion_hook_holder =
1728 cgraph_add_function_insertion_hook (&add_new_function, NULL);
1730 if (flag_indirect_inlining)
1732 ipa_register_cgraph_hooks ();
1733 ipa_check_create_node_params ();
1734 ipa_check_create_edge_args ();
1737 for (node = cgraph_nodes; node; node = node->next)
1738 if (node->analyzed)
1739 analyze_function (node);
1741 return;
1744 /* Apply inline plan to function. */
1745 static unsigned int
1746 inline_transform (struct cgraph_node *node)
1748 unsigned int todo = 0;
1749 struct cgraph_edge *e;
1751 /* We might need the body of this function so that we can expand
1752 it inline somewhere else. */
1753 if (cgraph_preserve_function_body_p (node->decl))
1754 save_inline_function_body (node);
1756 for (e = node->callees; e; e = e->next_callee)
1757 if (!e->inline_failed || warn_inline)
1758 break;
1760 if (e)
1762 timevar_push (TV_INTEGRATION);
1763 todo = optimize_inline_calls (current_function_decl);
1764 timevar_pop (TV_INTEGRATION);
1766 cfun->always_inline_functions_inlined = true;
1767 cfun->after_inlining = true;
1768 return todo | execute_fixup_cfg ();
1771 struct ipa_opt_pass_d pass_ipa_inline =
1774 IPA_PASS,
1775 "inline", /* name */
1776 NULL, /* gate */
1777 cgraph_decide_inlining, /* execute */
1778 NULL, /* sub */
1779 NULL, /* next */
1780 0, /* static_pass_number */
1781 TV_INLINE_HEURISTICS, /* tv_id */
1782 0, /* properties_required */
1783 0, /* properties_provided */
1784 0, /* properties_destroyed */
1785 TODO_remove_functions, /* todo_flags_finish */
1786 TODO_dump_cgraph | TODO_dump_func
1787 | TODO_remove_functions /* todo_flags_finish */
1789 inline_generate_summary, /* generate_summary */
1790 NULL, /* write_summary */
1791 NULL, /* read_summary */
1792 NULL, /* function_read_summary */
1793 0, /* TODOs */
1794 inline_transform, /* function_transform */
1795 NULL, /* variable_transform */
1799 #include "gt-ipa-inline.h"