gcc/ChangeLog ---------------------------------------------------------
[official-gcc.git] / gcc / ipa-inline.c
blob67ca5fdee2474ea075d077f15cbe04ba52270534
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
29 There are three major parts of this file:
31 cgraph_mark_inline implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
43 inlining heuristics
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used in non-unit-at-a-time mode.
66 The inliner itself is split into several passes:
68 pass_inline_parameters
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
79 pass_early_inlining
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
86 This is the main inlining pass in non-unit-at-a-time.
88 With unit-at-a-time the pass is run during conversion into SSA form.
89 Only functions already converted into SSA form are inlined, so the
90 conversion must happen in topological order on the callgraph (that is
91 maintained by pass manager). The functions after inlining are early
92 optimized so the early inliner sees unoptimized function itself, but
93 all considered callees are already optimized allowing it to unfold
94 abstraction penalty on C++ effectively and cheaply.
96 pass_ipa_early_inlining
98 With profiling, the early inlining is also necessary to reduce
99 instrumentation costs on program with high abstraction penalty (doing
100 many redundant calls). This can't happen in parallel with early
101 optimization and profile instrumentation, because we would end up
102 re-instrumenting already instrumented function bodies we brought in via
103 inlining.
105 To avoid this, this pass is executed as IPA pass before profiling. It is
106 simple wrapper to pass_early_inlining and ensures first inlining.
108 pass_ipa_inline
110 This is the main pass implementing simple greedy algorithm to do inlining
111 of small functions that results in overall growth of compilation unit and
112 inlining of functions called once. The pass compute just so called inline
113 plan (representation of inlining to be done in callgraph) and unlike early
114 inlining it is not performing the inlining itself.
116 pass_apply_inline
118 This pass performs actual inlining according to pass_ipa_inline on given
119 function. Possible the function body before inlining is saved when it is
120 needed for further inlining later.
123 #include "config.h"
124 #include "system.h"
125 #include "coretypes.h"
126 #include "tm.h"
127 #include "tree.h"
128 #include "tree-inline.h"
129 #include "langhooks.h"
130 #include "flags.h"
131 #include "cgraph.h"
132 #include "diagnostic.h"
133 #include "timevar.h"
134 #include "params.h"
135 #include "fibheap.h"
136 #include "intl.h"
137 #include "tree-pass.h"
138 #include "hashtab.h"
139 #include "coverage.h"
140 #include "ggc.h"
141 #include "tree-flow.h"
142 #include "rtl.h"
144 /* Mode incremental inliner operate on:
146 In ALWAYS_INLINE only functions marked
147 always_inline are inlined. This mode is used after detecting cycle during
148 flattening.
150 In SIZE mode, only functions that reduce function body size after inlining
151 are inlined, this is used during early inlining.
153 In SPEED mode, all small functions are inlined. This might result in
154 unbounded growth of compilation unit and is used only in non-unit-at-a-time
155 mode.
157 in ALL mode, everything is inlined. This is used during flattening. */
158 enum inlining_mode {
159 INLINE_NONE = 0,
160 INLINE_ALWAYS_INLINE,
161 INLINE_SIZE,
162 INLINE_SPEED,
163 INLINE_ALL
165 static bool
166 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
167 int);
170 /* Statistics we collect about inlining algorithm. */
171 static int ncalls_inlined;
172 static int nfunctions_inlined;
173 static int overall_insns;
174 static gcov_type max_count;
176 /* Estimate size of the function after inlining WHAT into TO. */
178 static int
179 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
180 struct cgraph_node *what)
182 int size;
183 tree fndecl = what->decl, arg;
184 int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
186 for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
187 call_insns += estimate_move_cost (TREE_TYPE (arg));
188 size = (what->global.insns - call_insns) * times + to->global.insns;
189 gcc_assert (size >= 0);
190 return size;
193 /* E is expected to be an edge being inlined. Clone destination node of
194 the edge and redirect it to the new clone.
195 DUPLICATE is used for bookkeeping on whether we are actually creating new
196 clones or re-using node originally representing out-of-line function call.
198 void
199 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, bool update_original)
201 HOST_WIDE_INT peak;
202 if (duplicate)
204 /* We may eliminate the need for out-of-line copy to be output.
205 In that case just go ahead and re-use it. */
206 if (!e->callee->callers->next_caller
207 && !e->callee->needed
208 && !cgraph_new_nodes
209 && flag_unit_at_a_time)
211 gcc_assert (!e->callee->global.inlined_to);
212 if (DECL_SAVED_TREE (e->callee->decl))
213 overall_insns -= e->callee->global.insns, nfunctions_inlined++;
214 duplicate = false;
216 else
218 struct cgraph_node *n;
219 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
220 update_original);
221 cgraph_redirect_edge_callee (e, n);
225 if (e->caller->global.inlined_to)
226 e->callee->global.inlined_to = e->caller->global.inlined_to;
227 else
228 e->callee->global.inlined_to = e->caller;
229 e->callee->global.stack_frame_offset
230 = e->caller->global.stack_frame_offset + e->caller->local.estimated_self_stack_size;
231 peak = e->callee->global.stack_frame_offset + e->callee->local.estimated_self_stack_size;
232 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
233 e->callee->global.inlined_to->global.estimated_stack_size = peak;
235 /* Recursively clone all bodies. */
236 for (e = e->callee->callees; e; e = e->next_callee)
237 if (!e->inline_failed)
238 cgraph_clone_inlined_nodes (e, duplicate, update_original);
241 /* Mark edge E as inlined and update callgraph accordingly.
242 UPDATE_ORIGINAL specify whether profile of original function should be
243 updated. */
245 void
246 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original)
248 int old_insns = 0, new_insns = 0;
249 struct cgraph_node *to = NULL, *what;
251 if (e->callee->inline_decl)
252 cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
254 gcc_assert (e->inline_failed);
255 e->inline_failed = NULL;
257 if (!e->callee->global.inlined && flag_unit_at_a_time)
258 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
259 e->callee->global.inlined = true;
261 cgraph_clone_inlined_nodes (e, true, update_original);
263 what = e->callee;
265 /* Now update size of caller and all functions caller is inlined into. */
266 for (;e && !e->inline_failed; e = e->caller->callers)
268 old_insns = e->caller->global.insns;
269 new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
270 what);
271 gcc_assert (new_insns >= 0);
272 to = e->caller;
273 to->global.insns = new_insns;
275 gcc_assert (what->global.inlined_to == to);
276 if (new_insns > old_insns)
277 overall_insns += new_insns - old_insns;
278 ncalls_inlined++;
281 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
282 Return following unredirected edge in the list of callers
283 of EDGE->CALLEE */
285 static struct cgraph_edge *
286 cgraph_mark_inline (struct cgraph_edge *edge)
288 struct cgraph_node *to = edge->caller;
289 struct cgraph_node *what = edge->callee;
290 struct cgraph_edge *e, *next;
291 int times = 0;
293 /* Look for all calls, mark them inline and clone recursively
294 all inlined functions. */
295 for (e = what->callers; e; e = next)
297 next = e->next_caller;
298 if (e->caller == to && e->inline_failed)
300 cgraph_mark_inline_edge (e, true);
301 if (e == edge)
302 edge = next;
303 times++;
306 gcc_assert (times);
307 return edge;
310 /* Estimate the growth caused by inlining NODE into all callees. */
312 static int
313 cgraph_estimate_growth (struct cgraph_node *node)
315 int growth = 0;
316 struct cgraph_edge *e;
317 if (node->global.estimated_growth != INT_MIN)
318 return node->global.estimated_growth;
320 for (e = node->callers; e; e = e->next_caller)
321 if (e->inline_failed)
322 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
323 - e->caller->global.insns);
325 /* ??? Wrong for self recursive functions or cases where we decide to not
326 inline for different reasons, but it is not big deal as in that case
327 we will keep the body around, but we will also avoid some inlining. */
328 if (!node->needed && !DECL_EXTERNAL (node->decl))
329 growth -= node->global.insns;
331 node->global.estimated_growth = growth;
332 return growth;
335 /* Return false when inlining WHAT into TO is not good idea
336 as it would cause too large growth of function bodies.
337 When ONE_ONLY is true, assume that only one call site is going
338 to be inlined, otherwise figure out how many call sites in
339 TO calls WHAT and verify that all can be inlined.
342 static bool
343 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
344 const char **reason, bool one_only)
346 int times = 0;
347 struct cgraph_edge *e;
348 int newsize;
349 int limit;
350 HOST_WIDE_INT stack_size_limit, inlined_stack;
352 if (one_only)
353 times = 1;
354 else
355 for (e = to->callees; e; e = e->next_callee)
356 if (e->callee == what)
357 times++;
359 if (to->global.inlined_to)
360 to = to->global.inlined_to;
362 /* When inlining large function body called once into small function,
363 take the inlined function as base for limiting the growth. */
364 if (to->local.self_insns > what->local.self_insns)
365 limit = to->local.self_insns;
366 else
367 limit = what->local.self_insns;
369 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
371 /* Check the size after inlining against the function limits. But allow
372 the function to shrink if it went over the limits by forced inlining. */
373 newsize = cgraph_estimate_size_after_inlining (times, to, what);
374 if (newsize >= to->global.insns
375 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
376 && newsize > limit)
378 if (reason)
379 *reason = N_("--param large-function-growth limit reached");
380 return false;
383 stack_size_limit = to->local.estimated_self_stack_size;
385 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
387 inlined_stack = (to->global.stack_frame_offset
388 + to->local.estimated_self_stack_size
389 + what->global.estimated_stack_size);
390 if (inlined_stack > stack_size_limit
391 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
393 if (reason)
394 *reason = N_("--param large-stack-frame-growth limit reached");
395 return false;
397 return true;
400 /* Return true when function N is small enough to be inlined. */
402 bool
403 cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
405 tree decl = n->decl;
407 if (n->inline_decl)
408 decl = n->inline_decl;
409 if (!DECL_INLINE (decl))
411 if (reason)
412 *reason = N_("function not inlinable");
413 return false;
416 if (!DECL_STRUCT_FUNCTION (decl)->cfg)
418 if (reason)
419 *reason = N_("function body not available");
420 return false;
423 if (DECL_DECLARED_INLINE_P (decl))
425 if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
427 if (reason)
428 *reason = N_("--param max-inline-insns-single limit reached");
429 return false;
432 else
434 if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
436 if (reason)
437 *reason = N_("--param max-inline-insns-auto limit reached");
438 return false;
442 return true;
445 /* Return true when inlining WHAT would create recursive inlining.
446 We call recursive inlining all cases where same function appears more than
447 once in the single recursion nest path in the inline graph. */
449 static bool
450 cgraph_recursive_inlining_p (struct cgraph_node *to,
451 struct cgraph_node *what,
452 const char **reason)
454 bool recursive;
455 if (to->global.inlined_to)
456 recursive = what->decl == to->global.inlined_to->decl;
457 else
458 recursive = what->decl == to->decl;
459 /* Marking recursive function inline has sane semantic and thus we should
460 not warn on it. */
461 if (recursive && reason)
462 *reason = (what->local.disregard_inline_limits
463 ? N_("recursive inlining") : "");
464 return recursive;
467 /* Return true if the call can be hot. */
468 static bool
469 cgraph_maybe_hot_edge_p (struct cgraph_edge *edge)
471 if (profile_info && flag_branch_probabilities
472 && (edge->count
473 <= profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION)))
474 return false;
475 return true;
478 /* A cost model driving the inlining heuristics in a way so the edges with
479 smallest badness are inlined first. After each inlining is performed
480 the costs of all caller edges of nodes affected are recomputed so the
481 metrics may accurately depend on values such as number of inlinable callers
482 of the function or function body size. */
484 static int
485 cgraph_edge_badness (struct cgraph_edge *edge)
487 int badness;
488 int growth =
489 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
491 growth -= edge->caller->global.insns;
493 /* Always prefer inlining saving code size. */
494 if (growth <= 0)
495 badness = INT_MIN - growth;
497 /* When profiling is available, base priorities -(#calls / growth).
498 So we optimize for overall number of "executed" inlined calls. */
499 else if (max_count)
500 badness = ((int)((double)edge->count * INT_MIN / max_count)) / growth;
502 /* When function local profile is available, base priorities on
503 growth / frequency, so we optimize for overall frequency of inlined
504 calls. This is not too accurate since while the call might be frequent
505 within function, the function itself is infrequent.
507 Other objective to optimize for is number of different calls inlined.
508 We add the estimated growth after inlining all functions to biass the
509 priorities slightly in this direction (so fewer times called functions
510 of the same size gets priority). */
511 else if (flag_guess_branch_prob)
513 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE;
514 int growth =
515 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
516 growth -= edge->caller->global.insns;
517 badness = growth * 256;
519 /* Decrease badness if call is nested. */
520 /* Compress the range so we don't overflow. */
521 if (div > 256)
522 div = 256 + ceil_log2 (div) - 8;
523 if (div < 1)
524 div = 1;
525 if (badness > 0)
526 badness /= div;
527 badness += cgraph_estimate_growth (edge->callee);
529 /* When function local profile is not available or it does not give
530 useful information (ie frequency is zero), base the cost on
531 loop nest and overall size growth, so we optimize for overall number
532 of functions fully inlined in program. */
533 else
535 int nest = MIN (edge->loop_nest, 8);
536 badness = cgraph_estimate_growth (edge->callee) * 256;
538 /* Decrease badness if call is nested. */
539 if (badness > 0)
540 badness >>= nest;
541 else
543 badness <<= nest;
546 /* Make recursive inlining happen always after other inlining is done. */
547 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
548 return badness + 1;
549 else
550 return badness;
553 /* Recompute heap nodes for each of caller edge. */
555 static void
556 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
557 bitmap updated_nodes)
559 struct cgraph_edge *edge;
560 const char *failed_reason;
562 if (!node->local.inlinable || node->local.disregard_inline_limits
563 || node->global.inlined_to)
564 return;
565 if (bitmap_bit_p (updated_nodes, node->uid))
566 return;
567 bitmap_set_bit (updated_nodes, node->uid);
568 node->global.estimated_growth = INT_MIN;
570 if (!node->local.inlinable)
571 return;
572 /* Prune out edges we won't inline into anymore. */
573 if (!cgraph_default_inline_p (node, &failed_reason))
575 for (edge = node->callers; edge; edge = edge->next_caller)
576 if (edge->aux)
578 fibheap_delete_node (heap, edge->aux);
579 edge->aux = NULL;
580 if (edge->inline_failed)
581 edge->inline_failed = failed_reason;
583 return;
586 for (edge = node->callers; edge; edge = edge->next_caller)
587 if (edge->inline_failed)
589 int badness = cgraph_edge_badness (edge);
590 if (edge->aux)
592 fibnode_t n = edge->aux;
593 gcc_assert (n->data == edge);
594 if (n->key == badness)
595 continue;
597 /* fibheap_replace_key only increase the keys. */
598 if (fibheap_replace_key (heap, n, badness))
599 continue;
600 fibheap_delete_node (heap, edge->aux);
602 edge->aux = fibheap_insert (heap, badness, edge);
606 /* Recompute heap nodes for each of caller edges of each of callees. */
608 static void
609 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
610 bitmap updated_nodes)
612 struct cgraph_edge *e;
613 node->global.estimated_growth = INT_MIN;
615 for (e = node->callees; e; e = e->next_callee)
616 if (e->inline_failed)
617 update_caller_keys (heap, e->callee, updated_nodes);
618 else if (!e->inline_failed)
619 update_callee_keys (heap, e->callee, updated_nodes);
622 /* Enqueue all recursive calls from NODE into priority queue depending on
623 how likely we want to recursively inline the call. */
625 static void
626 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
627 fibheap_t heap)
629 static int priority;
630 struct cgraph_edge *e;
631 for (e = where->callees; e; e = e->next_callee)
632 if (e->callee == node)
634 /* When profile feedback is available, prioritize by expected number
635 of calls. Without profile feedback we maintain simple queue
636 to order candidates via recursive depths. */
637 fibheap_insert (heap,
638 !max_count ? priority++
639 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
642 for (e = where->callees; e; e = e->next_callee)
643 if (!e->inline_failed)
644 lookup_recursive_calls (node, e->callee, heap);
647 /* Decide on recursive inlining: in the case function has recursive calls,
648 inline until body size reaches given argument. */
650 static bool
651 cgraph_decide_recursive_inlining (struct cgraph_node *node)
653 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
654 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
655 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
656 fibheap_t heap;
657 struct cgraph_edge *e;
658 struct cgraph_node *master_clone, *next;
659 int depth = 0;
660 int n = 0;
662 if (DECL_DECLARED_INLINE_P (node->decl))
664 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
665 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
668 /* Make sure that function is small enough to be considered for inlining. */
669 if (!max_depth
670 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
671 return false;
672 heap = fibheap_new ();
673 lookup_recursive_calls (node, node, heap);
674 if (fibheap_empty (heap))
676 fibheap_delete (heap);
677 return false;
680 if (dump_file)
681 fprintf (dump_file,
682 " Performing recursive inlining on %s\n",
683 cgraph_node_name (node));
685 /* We need original clone to copy around. */
686 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1, false);
687 master_clone->needed = true;
688 for (e = master_clone->callees; e; e = e->next_callee)
689 if (!e->inline_failed)
690 cgraph_clone_inlined_nodes (e, true, false);
692 /* Do the inlining and update list of recursive call during process. */
693 while (!fibheap_empty (heap)
694 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
695 <= limit))
697 struct cgraph_edge *curr = fibheap_extract_min (heap);
698 struct cgraph_node *cnode;
700 depth = 1;
701 for (cnode = curr->caller;
702 cnode->global.inlined_to; cnode = cnode->callers->caller)
703 if (node->decl == curr->callee->decl)
704 depth++;
705 if (depth > max_depth)
707 if (dump_file)
708 fprintf (dump_file,
709 " maxmal depth reached\n");
710 continue;
713 if (max_count)
715 if (!cgraph_maybe_hot_edge_p (curr))
717 if (dump_file)
718 fprintf (dump_file, " Not inlining cold call\n");
719 continue;
721 if (curr->count * 100 / node->count < probability)
723 if (dump_file)
724 fprintf (dump_file,
725 " Probability of edge is too small\n");
726 continue;
730 if (dump_file)
732 fprintf (dump_file,
733 " Inlining call of depth %i", depth);
734 if (node->count)
736 fprintf (dump_file, " called approx. %.2f times per call",
737 (double)curr->count / node->count);
739 fprintf (dump_file, "\n");
741 cgraph_redirect_edge_callee (curr, master_clone);
742 cgraph_mark_inline_edge (curr, false);
743 lookup_recursive_calls (node, curr->callee, heap);
744 n++;
746 if (!fibheap_empty (heap) && dump_file)
747 fprintf (dump_file, " Recursive inlining growth limit met.\n");
749 fibheap_delete (heap);
750 if (dump_file)
751 fprintf (dump_file,
752 "\n Inlined %i times, body grown from %i to %i insns\n", n,
753 master_clone->global.insns, node->global.insns);
755 /* Remove master clone we used for inlining. We rely that clones inlined
756 into master clone gets queued just before master clone so we don't
757 need recursion. */
758 for (node = cgraph_nodes; node != master_clone;
759 node = next)
761 next = node->next;
762 if (node->global.inlined_to == master_clone)
763 cgraph_remove_node (node);
765 cgraph_remove_node (master_clone);
766 /* FIXME: Recursive inlining actually reduces number of calls of the
767 function. At this place we should probably walk the function and
768 inline clones and compensate the counts accordingly. This probably
769 doesn't matter much in practice. */
770 return n > 0;
773 /* Set inline_failed for all callers of given function to REASON. */
775 static void
776 cgraph_set_inline_failed (struct cgraph_node *node, const char *reason)
778 struct cgraph_edge *e;
780 if (dump_file)
781 fprintf (dump_file, "Inlining failed: %s\n", reason);
782 for (e = node->callers; e; e = e->next_caller)
783 if (e->inline_failed)
784 e->inline_failed = reason;
787 /* Given whole compilation unit estimate of INSNS, compute how large we can
788 allow the unit to grow. */
789 static int
790 compute_max_insns (int insns)
792 int max_insns = insns;
793 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
794 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
796 return ((HOST_WIDEST_INT) max_insns
797 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
800 /* We use greedy algorithm for inlining of small functions:
801 All inline candidates are put into prioritized heap based on estimated
802 growth of the overall number of instructions and then update the estimates.
804 INLINED and INLINED_CALEES are just pointers to arrays large enough
805 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
807 static void
808 cgraph_decide_inlining_of_small_functions (void)
810 struct cgraph_node *node;
811 struct cgraph_edge *edge;
812 const char *failed_reason;
813 fibheap_t heap = fibheap_new ();
814 bitmap updated_nodes = BITMAP_ALLOC (NULL);
815 int min_insns, max_insns;
817 if (dump_file)
818 fprintf (dump_file, "\nDeciding on smaller functions:\n");
820 /* Put all inline candidates into the heap. */
822 for (node = cgraph_nodes; node; node = node->next)
824 if (!node->local.inlinable || !node->callers
825 || node->local.disregard_inline_limits)
826 continue;
827 if (dump_file)
828 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
830 node->global.estimated_growth = INT_MIN;
831 if (!cgraph_default_inline_p (node, &failed_reason))
833 cgraph_set_inline_failed (node, failed_reason);
834 continue;
837 for (edge = node->callers; edge; edge = edge->next_caller)
838 if (edge->inline_failed)
840 gcc_assert (!edge->aux);
841 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
845 max_insns = compute_max_insns (overall_insns);
846 min_insns = overall_insns;
848 while (overall_insns <= max_insns && (edge = fibheap_extract_min (heap)))
850 int old_insns = overall_insns;
851 struct cgraph_node *where;
852 int growth =
853 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
855 growth -= edge->caller->global.insns;
857 if (dump_file)
859 fprintf (dump_file,
860 "\nConsidering %s with %i insns\n",
861 cgraph_node_name (edge->callee),
862 edge->callee->global.insns);
863 fprintf (dump_file,
864 " to be inlined into %s\n"
865 " Estimated growth after inlined into all callees is %+i insns.\n"
866 " Estimated badness is %i, frequency %.2f.\n",
867 cgraph_node_name (edge->caller),
868 cgraph_estimate_growth (edge->callee),
869 cgraph_edge_badness (edge),
870 edge->frequency / (double)CGRAPH_FREQ_BASE);
871 if (edge->count)
872 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
874 gcc_assert (edge->aux);
875 edge->aux = NULL;
876 if (!edge->inline_failed)
877 continue;
879 /* When not having profile info ready we don't weight by any way the
880 position of call in procedure itself. This means if call of
881 function A from function B seems profitable to inline, the recursive
882 call of function A in inline copy of A in B will look profitable too
883 and we end up inlining until reaching maximal function growth. This
884 is not good idea so prohibit the recursive inlining.
886 ??? When the frequencies are taken into account we might not need this
887 restriction. */
888 if (!max_count)
890 where = edge->caller;
891 while (where->global.inlined_to)
893 if (where->decl == edge->callee->decl)
894 break;
895 where = where->callers->caller;
897 if (where->global.inlined_to)
899 edge->inline_failed
900 = (edge->callee->local.disregard_inline_limits ? N_("recursive inlining") : "");
901 if (dump_file)
902 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
903 continue;
907 if (!cgraph_maybe_hot_edge_p (edge) && growth > 0)
909 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
910 &edge->inline_failed))
912 edge->inline_failed =
913 N_("call is unlikely");
914 if (dump_file)
915 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
917 continue;
919 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
921 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
922 &edge->inline_failed))
924 if (dump_file)
925 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
927 continue;
929 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
930 &edge->inline_failed))
932 where = edge->caller;
933 if (where->global.inlined_to)
934 where = where->global.inlined_to;
935 if (!cgraph_decide_recursive_inlining (where))
936 continue;
937 update_callee_keys (heap, where, updated_nodes);
939 else
941 struct cgraph_node *callee;
942 if (!cgraph_check_inline_limits (edge->caller, edge->callee,
943 &edge->inline_failed, true))
945 if (dump_file)
946 fprintf (dump_file, " Not inlining into %s:%s.\n",
947 cgraph_node_name (edge->caller), edge->inline_failed);
948 continue;
950 callee = edge->callee;
951 cgraph_mark_inline_edge (edge, true);
952 update_callee_keys (heap, callee, updated_nodes);
954 where = edge->caller;
955 if (where->global.inlined_to)
956 where = where->global.inlined_to;
958 /* Our profitability metric can depend on local properties
959 such as number of inlinable calls and size of the function body.
960 After inlining these properties might change for the function we
961 inlined into (since it's body size changed) and for the functions
962 called by function we inlined (since number of it inlinable callers
963 might change). */
964 update_caller_keys (heap, where, updated_nodes);
965 bitmap_clear (updated_nodes);
967 if (dump_file)
969 fprintf (dump_file,
970 " Inlined into %s which now has %i insns,"
971 "net change of %+i insns.\n",
972 cgraph_node_name (edge->caller),
973 edge->caller->global.insns,
974 overall_insns - old_insns);
976 if (min_insns > overall_insns)
978 min_insns = overall_insns;
979 max_insns = compute_max_insns (min_insns);
981 if (dump_file)
982 fprintf (dump_file, "New minimal insns reached: %i\n", min_insns);
985 while ((edge = fibheap_extract_min (heap)) != NULL)
987 gcc_assert (edge->aux);
988 edge->aux = NULL;
989 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
990 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
991 &edge->inline_failed))
992 edge->inline_failed = N_("--param inline-unit-growth limit reached");
994 fibheap_delete (heap);
995 BITMAP_FREE (updated_nodes);
998 /* Decide on the inlining. We do so in the topological order to avoid
999 expenses on updating data structures. */
1001 static unsigned int
1002 cgraph_decide_inlining (void)
1004 struct cgraph_node *node;
1005 int nnodes;
1006 struct cgraph_node **order =
1007 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1008 int old_insns = 0;
1009 int i;
1010 int initial_insns = 0;
1012 max_count = 0;
1013 for (node = cgraph_nodes; node; node = node->next)
1014 if (node->analyzed && (node->needed || node->reachable))
1016 struct cgraph_edge *e;
1018 initial_insns += node->local.self_insns;
1019 gcc_assert (node->local.self_insns == node->global.insns);
1020 for (e = node->callees; e; e = e->next_callee)
1021 if (max_count < e->count)
1022 max_count = e->count;
1024 overall_insns = initial_insns;
1025 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
1027 nnodes = cgraph_postorder (order);
1029 if (dump_file)
1030 fprintf (dump_file,
1031 "\nDeciding on inlining. Starting with %i insns.\n",
1032 initial_insns);
1034 for (node = cgraph_nodes; node; node = node->next)
1035 node->aux = 0;
1037 if (dump_file)
1038 fprintf (dump_file, "\nInlining always_inline functions:\n");
1040 /* In the first pass mark all always_inline edges. Do this with a priority
1041 so none of our later choices will make this impossible. */
1042 for (i = nnodes - 1; i >= 0; i--)
1044 struct cgraph_edge *e, *next;
1046 node = order[i];
1048 /* Handle nodes to be flattened, but don't update overall unit size. */
1049 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1051 if (dump_file)
1052 fprintf (dump_file,
1053 "Flattening %s\n", cgraph_node_name (node));
1054 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1057 if (!node->local.disregard_inline_limits)
1058 continue;
1059 if (dump_file)
1060 fprintf (dump_file,
1061 "\nConsidering %s %i insns (always inline)\n",
1062 cgraph_node_name (node), node->global.insns);
1063 old_insns = overall_insns;
1064 for (e = node->callers; e; e = next)
1066 next = e->next_caller;
1067 if (!e->inline_failed)
1068 continue;
1069 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1070 &e->inline_failed))
1071 continue;
1072 cgraph_mark_inline_edge (e, true);
1073 if (dump_file)
1074 fprintf (dump_file,
1075 " Inlined into %s which now has %i insns.\n",
1076 cgraph_node_name (e->caller),
1077 e->caller->global.insns);
1079 /* Inlining self recursive function might introduce new calls to
1080 themselves we didn't see in the loop above. Fill in the proper
1081 reason why inline failed. */
1082 for (e = node->callers; e; e = e->next_caller)
1083 if (e->inline_failed)
1084 e->inline_failed = N_("recursive inlining");
1085 if (dump_file)
1086 fprintf (dump_file,
1087 " Inlined for a net change of %+i insns.\n",
1088 overall_insns - old_insns);
1091 if (!flag_really_no_inline)
1092 cgraph_decide_inlining_of_small_functions ();
1094 if (!flag_really_no_inline
1095 && flag_inline_functions_called_once)
1097 if (dump_file)
1098 fprintf (dump_file, "\nDeciding on functions called once:\n");
1100 /* And finally decide what functions are called once. */
1102 for (i = nnodes - 1; i >= 0; i--)
1104 node = order[i];
1106 if (node->callers && !node->callers->next_caller && !node->needed
1107 && node->local.inlinable && node->callers->inline_failed
1108 && !DECL_EXTERNAL (node->decl) && !DECL_COMDAT (node->decl))
1110 if (dump_file)
1112 fprintf (dump_file,
1113 "\nConsidering %s %i insns.\n",
1114 cgraph_node_name (node), node->global.insns);
1115 fprintf (dump_file,
1116 " Called once from %s %i insns.\n",
1117 cgraph_node_name (node->callers->caller),
1118 node->callers->caller->global.insns);
1121 old_insns = overall_insns;
1123 if (cgraph_check_inline_limits (node->callers->caller, node,
1124 NULL, false))
1126 cgraph_mark_inline (node->callers);
1127 if (dump_file)
1128 fprintf (dump_file,
1129 " Inlined into %s which now has %i insns"
1130 " for a net change of %+i insns.\n",
1131 cgraph_node_name (node->callers->caller),
1132 node->callers->caller->global.insns,
1133 overall_insns - old_insns);
1135 else
1137 if (dump_file)
1138 fprintf (dump_file,
1139 " Inline limit reached, not inlined.\n");
1145 if (dump_file)
1146 fprintf (dump_file,
1147 "\nInlined %i calls, eliminated %i functions, "
1148 "%i insns turned to %i insns.\n\n",
1149 ncalls_inlined, nfunctions_inlined, initial_insns,
1150 overall_insns);
1151 free (order);
1152 return 0;
1155 /* Try to inline edge E from incremental inliner. MODE specifies mode
1156 of inliner.
1158 We are detecting cycles by storing mode of inliner into cgraph_node last
1159 time we visited it in the recursion. In general when mode is set, we have
1160 recursive inlining, but as an special case, we want to try harder inline
1161 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1162 flatten, b being always inline. Flattening 'a' will collapse
1163 a->b->c before hitting cycle. To accommodate always inline, we however
1164 need to inline a->b->c->b.
1166 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1167 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1168 static bool
1169 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1171 struct cgraph_node *callee = e->callee;
1172 enum inlining_mode callee_mode = (size_t) callee->aux;
1173 bool always_inline = e->callee->local.disregard_inline_limits;
1175 /* We've hit cycle? */
1176 if (callee_mode)
1178 /* It is first time we see it and we are not in ALWAY_INLINE only
1179 mode yet. and the function in question is always_inline. */
1180 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1182 if (dump_file)
1184 indent_to (dump_file, depth);
1185 fprintf (dump_file,
1186 "Hit cycle in %s, switching to always inline only.\n",
1187 cgraph_node_name (callee));
1189 mode = INLINE_ALWAYS_INLINE;
1191 /* Otherwise it is time to give up. */
1192 else
1194 if (dump_file)
1196 indent_to (dump_file, depth);
1197 fprintf (dump_file,
1198 "Not inlining %s into %s to avoid cycle.\n",
1199 cgraph_node_name (callee),
1200 cgraph_node_name (e->caller));
1202 e->inline_failed = (e->callee->local.disregard_inline_limits
1203 ? N_("recursive inlining") : "");
1204 return false;
1208 callee->aux = (void *)(size_t) mode;
1209 if (dump_file)
1211 indent_to (dump_file, depth);
1212 fprintf (dump_file, " Inlining %s into %s.\n",
1213 cgraph_node_name (e->callee),
1214 cgraph_node_name (e->caller));
1216 if (e->inline_failed)
1217 cgraph_mark_inline (e);
1219 /* In order to fully inline always_inline functions at -O0, we need to
1220 recurse here, since the inlined functions might not be processed by
1221 incremental inlining at all yet.
1223 Also flattening needs to be done recursively. */
1225 if (!flag_unit_at_a_time || mode == INLINE_ALL || always_inline)
1226 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1227 callee->aux = (void *)(size_t) callee_mode;
1228 return true;
1231 /* Decide on the inlining. We do so in the topological order to avoid
1232 expenses on updating data structures.
1233 DEPTH is depth of recursion, used only for debug output. */
1235 static bool
1236 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1237 enum inlining_mode mode,
1238 int depth)
1240 struct cgraph_edge *e;
1241 bool inlined = false;
1242 const char *failed_reason;
1243 enum inlining_mode old_mode;
1245 #ifdef ENABLE_CHECKING
1246 verify_cgraph_node (node);
1247 #endif
1249 old_mode = (size_t)node->aux;
1251 if (mode != INLINE_ALWAYS_INLINE
1252 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1254 if (dump_file)
1256 indent_to (dump_file, depth);
1257 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1259 mode = INLINE_ALL;
1262 node->aux = (void *)(size_t) mode;
1264 /* First of all look for always inline functions. */
1265 for (e = node->callees; e; e = e->next_callee)
1267 if (!e->callee->local.disregard_inline_limits
1268 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1269 continue;
1270 /* When the edge is already inlined, we just need to recurse into
1271 it in order to fully flatten the leaves. */
1272 if (!e->inline_failed && mode == INLINE_ALL)
1274 inlined |= try_inline (e, mode, depth);
1275 continue;
1277 if (dump_file)
1279 indent_to (dump_file, depth);
1280 fprintf (dump_file,
1281 "Considering to always inline inline candidate %s.\n",
1282 cgraph_node_name (e->callee));
1284 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1286 if (dump_file)
1288 indent_to (dump_file, depth);
1289 fprintf (dump_file, "Not inlining: recursive call.\n");
1291 continue;
1293 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1294 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1296 if (dump_file)
1298 indent_to (dump_file, depth);
1299 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1301 continue;
1303 if (!DECL_SAVED_TREE (e->callee->decl) && !e->callee->inline_decl)
1305 if (dump_file)
1307 indent_to (dump_file, depth);
1308 fprintf (dump_file,
1309 "Not inlining: Function body no longer available.\n");
1311 continue;
1313 inlined |= try_inline (e, mode, depth);
1316 /* Now do the automatic inlining. */
1317 if (!flag_really_no_inline && mode != INLINE_ALL
1318 && mode != INLINE_ALWAYS_INLINE)
1319 for (e = node->callees; e; e = e->next_callee)
1321 if (!e->callee->local.inlinable
1322 || !e->inline_failed
1323 || e->callee->local.disregard_inline_limits)
1324 continue;
1325 if (dump_file)
1326 fprintf (dump_file, "Considering inline candidate %s.\n",
1327 cgraph_node_name (e->callee));
1328 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1330 if (dump_file)
1332 indent_to (dump_file, depth);
1333 fprintf (dump_file, "Not inlining: recursive call.\n");
1335 continue;
1337 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1338 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1340 if (dump_file)
1342 indent_to (dump_file, depth);
1343 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1345 continue;
1347 /* When the function body would grow and inlining the function won't
1348 eliminate the need for offline copy of the function, don't inline.
1350 if (mode == INLINE_SIZE
1351 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1352 > e->caller->global.insns)
1353 && cgraph_estimate_growth (e->callee) > 0)
1355 if (dump_file)
1357 indent_to (dump_file, depth);
1358 fprintf (dump_file,
1359 "Not inlining: code size would grow by %i insns.\n",
1360 cgraph_estimate_size_after_inlining (1, e->caller,
1361 e->callee)
1362 - e->caller->global.insns);
1364 continue;
1366 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1367 false))
1369 if (dump_file)
1371 indent_to (dump_file, depth);
1372 fprintf (dump_file, "Not inlining: %s.\n", e->inline_failed);
1374 continue;
1376 if (!DECL_SAVED_TREE (e->callee->decl) && !e->callee->inline_decl)
1378 if (dump_file)
1380 indent_to (dump_file, depth);
1381 fprintf (dump_file,
1382 "Not inlining: Function body no longer available.\n");
1384 continue;
1386 if (cgraph_default_inline_p (e->callee, &failed_reason))
1387 inlined |= try_inline (e, mode, depth);
1388 else if (!flag_unit_at_a_time)
1389 e->inline_failed = failed_reason;
1391 node->aux = (void *)(size_t) old_mode;
1392 return inlined;
1395 /* When inlining shall be performed. */
1396 static bool
1397 cgraph_gate_inlining (void)
1399 return flag_inline_trees;
1402 struct tree_opt_pass pass_ipa_inline =
1404 "inline", /* name */
1405 cgraph_gate_inlining, /* gate */
1406 cgraph_decide_inlining, /* execute */
1407 NULL, /* sub */
1408 NULL, /* next */
1409 0, /* static_pass_number */
1410 TV_INLINE_HEURISTICS, /* tv_id */
1411 0, /* properties_required */
1412 PROP_cfg, /* properties_provided */
1413 0, /* properties_destroyed */
1414 TODO_remove_functions, /* todo_flags_finish */
1415 TODO_dump_cgraph | TODO_dump_func
1416 | TODO_remove_functions, /* todo_flags_finish */
1417 0 /* letter */
1420 /* Because inlining might remove no-longer reachable nodes, we need to
1421 keep the array visible to garbage collector to avoid reading collected
1422 out nodes. */
1423 static int nnodes;
1424 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1426 /* Do inlining of small functions. Doing so early helps profiling and other
1427 passes to be somewhat more effective and avoids some code duplication in
1428 later real inlining pass for testcases with very many function calls. */
1429 static unsigned int
1430 cgraph_early_inlining (void)
1432 struct cgraph_node *node = cgraph_node (current_function_decl);
1433 unsigned int todo = 0;
1435 if (sorrycount || errorcount)
1436 return 0;
1437 if (cgraph_decide_inlining_incrementally (node,
1438 flag_unit_at_a_time
1439 ? INLINE_SIZE : INLINE_SPEED, 0))
1441 timevar_push (TV_INTEGRATION);
1442 todo = optimize_inline_calls (current_function_decl);
1443 timevar_pop (TV_INTEGRATION);
1445 return todo;
1448 /* When inlining shall be performed. */
1449 static bool
1450 cgraph_gate_early_inlining (void)
1452 return flag_inline_trees && flag_early_inlining;
1455 struct tree_opt_pass pass_early_inline =
1457 "einline", /* name */
1458 cgraph_gate_early_inlining, /* gate */
1459 cgraph_early_inlining, /* execute */
1460 NULL, /* sub */
1461 NULL, /* next */
1462 0, /* static_pass_number */
1463 TV_INLINE_HEURISTICS, /* tv_id */
1464 0, /* properties_required */
1465 PROP_cfg, /* properties_provided */
1466 0, /* properties_destroyed */
1467 0, /* todo_flags_start */
1468 TODO_dump_func, /* todo_flags_finish */
1469 0 /* letter */
1472 /* When inlining shall be performed. */
1473 static bool
1474 cgraph_gate_ipa_early_inlining (void)
1476 return (flag_inline_trees && flag_early_inlining
1477 && (flag_branch_probabilities || flag_test_coverage
1478 || profile_arc_flag));
1481 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1482 before tree profiling so we have stand alone IPA pass for doing so. */
1483 struct tree_opt_pass pass_ipa_early_inline =
1485 "einline_ipa", /* name */
1486 cgraph_gate_ipa_early_inlining, /* gate */
1487 NULL, /* execute */
1488 NULL, /* sub */
1489 NULL, /* next */
1490 0, /* static_pass_number */
1491 TV_INLINE_HEURISTICS, /* tv_id */
1492 0, /* properties_required */
1493 PROP_cfg, /* properties_provided */
1494 0, /* properties_destroyed */
1495 0, /* todo_flags_start */
1496 TODO_dump_cgraph, /* todo_flags_finish */
1497 0 /* letter */
1500 /* Compute parameters of functions used by inliner. */
1501 static unsigned int
1502 compute_inline_parameters (void)
1504 struct cgraph_node *node = cgraph_node (current_function_decl);
1506 gcc_assert (!node->global.inlined_to);
1507 node->local.estimated_self_stack_size = estimated_stack_frame_size ();
1508 node->global.estimated_stack_size = node->local.estimated_self_stack_size;
1509 node->global.stack_frame_offset = 0;
1510 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1511 node->local.self_insns = estimate_num_insns (current_function_decl,
1512 &eni_inlining_weights);
1513 if (node->local.inlinable)
1514 node->local.disregard_inline_limits
1515 = lang_hooks.tree_inlining.disregard_inline_limits (current_function_decl);
1516 if (flag_really_no_inline && !node->local.disregard_inline_limits)
1517 node->local.inlinable = 0;
1518 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1519 node->global.insns = node->local.self_insns;
1520 return 0;
1523 /* When inlining shall be performed. */
1524 static bool
1525 gate_inline_passes (void)
1527 return flag_inline_trees;
1530 struct tree_opt_pass pass_inline_parameters =
1532 NULL, /* name */
1533 gate_inline_passes, /* gate */
1534 compute_inline_parameters, /* execute */
1535 NULL, /* sub */
1536 NULL, /* next */
1537 0, /* static_pass_number */
1538 TV_INLINE_HEURISTICS, /* tv_id */
1539 0, /* properties_required */
1540 PROP_cfg, /* properties_provided */
1541 0, /* properties_destroyed */
1542 0, /* todo_flags_start */
1543 0, /* todo_flags_finish */
1544 0 /* letter */
1547 /* Apply inline plan to the function. */
1548 static unsigned int
1549 apply_inline (void)
1551 unsigned int todo = 0;
1552 struct cgraph_edge *e;
1553 struct cgraph_node *node = cgraph_node (current_function_decl);
1555 /* Even when not optimizing, ensure that always_inline functions get inlined.
1557 if (!optimize)
1558 cgraph_decide_inlining_incrementally (node, INLINE_SPEED, 0);
1560 /* We might need the body of this function so that we can expand
1561 it inline somewhere else. */
1562 if (cgraph_preserve_function_body_p (current_function_decl))
1563 save_inline_function_body (node);
1565 for (e = node->callees; e; e = e->next_callee)
1566 if (!e->inline_failed || warn_inline)
1567 break;
1568 if (e)
1570 timevar_push (TV_INTEGRATION);
1571 todo = optimize_inline_calls (current_function_decl);
1572 timevar_pop (TV_INTEGRATION);
1574 /* In non-unit-at-a-time we must mark all referenced functions as needed. */
1575 if (!flag_unit_at_a_time)
1577 struct cgraph_edge *e;
1578 for (e = node->callees; e; e = e->next_callee)
1579 if (e->callee->analyzed)
1580 cgraph_mark_needed_node (e->callee);
1582 return todo | execute_fixup_cfg ();
1585 struct tree_opt_pass pass_apply_inline =
1587 "apply_inline", /* name */
1588 NULL, /* gate */
1589 apply_inline, /* execute */
1590 NULL, /* sub */
1591 NULL, /* next */
1592 0, /* static_pass_number */
1593 TV_INLINE_HEURISTICS, /* tv_id */
1594 0, /* properties_required */
1595 PROP_cfg, /* properties_provided */
1596 0, /* properties_destroyed */
1597 0, /* todo_flags_start */
1598 TODO_dump_func | TODO_verify_flow
1599 | TODO_verify_stmts, /* todo_flags_finish */
1600 0 /* letter */
1603 #include "gt-ipa-inline.h"