2007-05-22 H.J. Lu <hongjiu.lu@intel.com>
[official-gcc.git] / gcc / ipa-inline.c
blob076979ed5de2fb419fb7b6a2e7ebcd556607368a
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
29 There are three major parts of this file:
31 cgraph_mark_inline implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
43 inlining heuristics
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used in non-unit-at-a-time mode.
66 The inliner itself is split into several passes:
68 pass_inline_parameters
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
79 pass_early_inlining
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
86 This is the main inlining pass in non-unit-at-a-time.
88 With unit-at-a-time the pass is run during conversion into SSA form.
89 Only functions already converted into SSA form are inlined, so the
90 conversion must happen in topological order on the callgraph (that is
91 maintained by pass manager). The functions after inlining are early
92 optimized so the early inliner sees unoptimized function itself, but
93 all considered callees are already optimized allowing it to unfold
94 abstraction penalty on C++ effectively and cheaply.
96 pass_ipa_early_inlining
98 With profiling, the early inlining is also necessary to reduce
99 instrumentation costs on program with high abstraction penalty (doing
100 many redundant calls). This can't happen in parallel with early
101 optimization and profile instrumentation, because we would end up
102 re-instrumenting already instrumented function bodies we brought in via
103 inlining.
105 To avoid this, this pass is executed as IPA pass before profiling. It is
106 simple wrapper to pass_early_inlining and ensures first inlining.
108 pass_ipa_inline
110 This is the main pass implementing simple greedy algorithm to do inlining
111 of small functions that results in overall growth of compilation unit and
112 inlining of functions called once. The pass compute just so called inline
113 plan (representation of inlining to be done in callgraph) and unlike early
114 inlining it is not performing the inlining itself.
116 pass_apply_inline
118 This pass performs actual inlining according to pass_ipa_inline on given
119 function. Possible the function body before inlining is saved when it is
120 needed for further inlining later.
123 #include "config.h"
124 #include "system.h"
125 #include "coretypes.h"
126 #include "tm.h"
127 #include "tree.h"
128 #include "tree-inline.h"
129 #include "langhooks.h"
130 #include "flags.h"
131 #include "cgraph.h"
132 #include "diagnostic.h"
133 #include "timevar.h"
134 #include "params.h"
135 #include "fibheap.h"
136 #include "intl.h"
137 #include "tree-pass.h"
138 #include "hashtab.h"
139 #include "coverage.h"
140 #include "ggc.h"
141 #include "tree-flow.h"
142 #include "rtl.h"
144 /* Mode incremental inliner operate on:
146 In ALWAYS_INLINE only functions marked
147 always_inline are inlined. This mode is used after detecting cycle during
148 flattening.
150 In SIZE mode, only functions that reduce function body size after inlining
151 are inlined, this is used during early inlining.
153 In SPEED mode, all small functions are inlined. This might result in
154 unbounded growth of compilation unit and is used only in non-unit-at-a-time
155 mode.
157 in ALL mode, everything is inlined. This is used during flattening. */
158 enum inlining_mode {
159 INLINE_NONE = 0,
160 INLINE_ALWAYS_INLINE,
161 INLINE_SIZE,
162 INLINE_SPEED,
163 INLINE_ALL
165 static bool
166 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
167 int);
170 /* Statistics we collect about inlining algorithm. */
171 static int ncalls_inlined;
172 static int nfunctions_inlined;
173 static int overall_insns;
174 static gcov_type max_count;
176 /* Estimate size of the function after inlining WHAT into TO. */
178 static int
179 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
180 struct cgraph_node *what)
182 int size;
183 tree fndecl = what->decl, arg;
184 int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
186 for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
187 call_insns += estimate_move_cost (TREE_TYPE (arg));
188 size = (what->global.insns - call_insns) * times + to->global.insns;
189 gcc_assert (size >= 0);
190 return size;
193 /* E is expected to be an edge being inlined. Clone destination node of
194 the edge and redirect it to the new clone.
195 DUPLICATE is used for bookkeeping on whether we are actually creating new
196 clones or re-using node originally representing out-of-line function call.
198 void
199 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, bool update_original)
201 HOST_WIDE_INT peak;
202 if (duplicate)
204 /* We may eliminate the need for out-of-line copy to be output.
205 In that case just go ahead and re-use it. */
206 if (!e->callee->callers->next_caller
207 && !e->callee->needed
208 && !cgraph_new_nodes
209 && flag_unit_at_a_time)
211 gcc_assert (!e->callee->global.inlined_to);
212 if (DECL_SAVED_TREE (e->callee->decl))
213 overall_insns -= e->callee->global.insns, nfunctions_inlined++;
214 duplicate = false;
216 else
218 struct cgraph_node *n;
219 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
220 update_original);
221 cgraph_redirect_edge_callee (e, n);
225 if (e->caller->global.inlined_to)
226 e->callee->global.inlined_to = e->caller->global.inlined_to;
227 else
228 e->callee->global.inlined_to = e->caller;
229 e->callee->global.stack_frame_offset
230 = e->caller->global.stack_frame_offset + e->caller->local.estimated_self_stack_size;
231 peak = e->callee->global.stack_frame_offset + e->callee->local.estimated_self_stack_size;
232 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
233 e->callee->global.inlined_to->global.estimated_stack_size = peak;
235 /* Recursively clone all bodies. */
236 for (e = e->callee->callees; e; e = e->next_callee)
237 if (!e->inline_failed)
238 cgraph_clone_inlined_nodes (e, duplicate, update_original);
241 /* Mark edge E as inlined and update callgraph accordingly.
242 UPDATE_ORIGINAL specify whether profile of original function should be
243 updated. */
245 void
246 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original)
248 int old_insns = 0, new_insns = 0;
249 struct cgraph_node *to = NULL, *what;
251 if (e->callee->inline_decl)
252 cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
254 gcc_assert (e->inline_failed);
255 e->inline_failed = NULL;
257 if (!e->callee->global.inlined && flag_unit_at_a_time)
258 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
259 e->callee->global.inlined = true;
261 cgraph_clone_inlined_nodes (e, true, update_original);
263 what = e->callee;
265 /* Now update size of caller and all functions caller is inlined into. */
266 for (;e && !e->inline_failed; e = e->caller->callers)
268 old_insns = e->caller->global.insns;
269 new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
270 what);
271 gcc_assert (new_insns >= 0);
272 to = e->caller;
273 to->global.insns = new_insns;
275 gcc_assert (what->global.inlined_to == to);
276 if (new_insns > old_insns)
277 overall_insns += new_insns - old_insns;
278 ncalls_inlined++;
281 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
282 Return following unredirected edge in the list of callers
283 of EDGE->CALLEE */
285 static struct cgraph_edge *
286 cgraph_mark_inline (struct cgraph_edge *edge)
288 struct cgraph_node *to = edge->caller;
289 struct cgraph_node *what = edge->callee;
290 struct cgraph_edge *e, *next;
291 int times = 0;
293 /* Look for all calls, mark them inline and clone recursively
294 all inlined functions. */
295 for (e = what->callers; e; e = next)
297 next = e->next_caller;
298 if (e->caller == to && e->inline_failed)
300 cgraph_mark_inline_edge (e, true);
301 if (e == edge)
302 edge = next;
303 times++;
306 gcc_assert (times);
307 return edge;
310 /* Estimate the growth caused by inlining NODE into all callees. */
312 static int
313 cgraph_estimate_growth (struct cgraph_node *node)
315 int growth = 0;
316 struct cgraph_edge *e;
317 if (node->global.estimated_growth != INT_MIN)
318 return node->global.estimated_growth;
320 for (e = node->callers; e; e = e->next_caller)
321 if (e->inline_failed)
322 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
323 - e->caller->global.insns);
325 /* ??? Wrong for self recursive functions or cases where we decide to not
326 inline for different reasons, but it is not big deal as in that case
327 we will keep the body around, but we will also avoid some inlining. */
328 if (!node->needed && !DECL_EXTERNAL (node->decl))
329 growth -= node->global.insns;
331 node->global.estimated_growth = growth;
332 return growth;
335 /* Return false when inlining WHAT into TO is not good idea
336 as it would cause too large growth of function bodies.
337 When ONE_ONLY is true, assume that only one call site is going
338 to be inlined, otherwise figure out how many call sites in
339 TO calls WHAT and verify that all can be inlined.
342 static bool
343 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
344 const char **reason, bool one_only)
346 int times = 0;
347 struct cgraph_edge *e;
348 int newsize;
349 int limit;
350 HOST_WIDE_INT stack_size_limit, inlined_stack;
352 if (one_only)
353 times = 1;
354 else
355 for (e = to->callees; e; e = e->next_callee)
356 if (e->callee == what)
357 times++;
359 if (to->global.inlined_to)
360 to = to->global.inlined_to;
362 /* When inlining large function body called once into small function,
363 take the inlined function as base for limiting the growth. */
364 if (to->local.self_insns > what->local.self_insns)
365 limit = to->local.self_insns;
366 else
367 limit = what->local.self_insns;
369 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
371 /* Check the size after inlining against the function limits. But allow
372 the function to shrink if it went over the limits by forced inlining. */
373 newsize = cgraph_estimate_size_after_inlining (times, to, what);
374 if (newsize >= to->global.insns
375 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
376 && newsize > limit)
378 if (reason)
379 *reason = N_("--param large-function-growth limit reached");
380 return false;
383 stack_size_limit = to->local.estimated_self_stack_size;
385 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
387 inlined_stack = (to->global.stack_frame_offset
388 + to->local.estimated_self_stack_size
389 + what->global.estimated_stack_size);
390 if (inlined_stack > stack_size_limit
391 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
393 if (reason)
394 *reason = N_("--param large-stack-frame-growth limit reached");
395 return false;
397 return true;
400 /* Return true when function N is small enough to be inlined. */
402 bool
403 cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
405 tree decl = n->decl;
407 if (n->inline_decl)
408 decl = n->inline_decl;
409 if (!DECL_INLINE (decl))
411 if (reason)
412 *reason = N_("function not inlinable");
413 return false;
416 if (!DECL_STRUCT_FUNCTION (decl)->cfg)
418 if (reason)
419 *reason = N_("function body not available");
420 return false;
423 if (DECL_DECLARED_INLINE_P (decl))
425 if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
427 if (reason)
428 *reason = N_("--param max-inline-insns-single limit reached");
429 return false;
432 else
434 if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
436 if (reason)
437 *reason = N_("--param max-inline-insns-auto limit reached");
438 return false;
442 return true;
445 /* Return true when inlining WHAT would create recursive inlining.
446 We call recursive inlining all cases where same function appears more than
447 once in the single recursion nest path in the inline graph. */
449 static bool
450 cgraph_recursive_inlining_p (struct cgraph_node *to,
451 struct cgraph_node *what,
452 const char **reason)
454 bool recursive;
455 if (to->global.inlined_to)
456 recursive = what->decl == to->global.inlined_to->decl;
457 else
458 recursive = what->decl == to->decl;
459 /* Marking recursive function inline has sane semantic and thus we should
460 not warn on it. */
461 if (recursive && reason)
462 *reason = (what->local.disregard_inline_limits
463 ? N_("recursive inlining") : "");
464 return recursive;
467 /* Return true if the call can be hot. */
468 static bool
469 cgraph_maybe_hot_edge_p (struct cgraph_edge *edge)
471 if (profile_info && flag_branch_probabilities
472 && (edge->count
473 <= profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION)))
474 return false;
475 if (lookup_attribute ("cold", DECL_ATTRIBUTES (edge->callee->decl))
476 || lookup_attribute ("cold", DECL_ATTRIBUTES (edge->caller->decl)))
477 return false;
478 if (lookup_attribute ("hot", DECL_ATTRIBUTES (edge->caller->decl)))
479 return true;
480 if (flag_guess_branch_prob
481 && edge->frequency < (CGRAPH_FREQ_MAX
482 / PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)))
483 return false;
484 return true;
487 /* A cost model driving the inlining heuristics in a way so the edges with
488 smallest badness are inlined first. After each inlining is performed
489 the costs of all caller edges of nodes affected are recomputed so the
490 metrics may accurately depend on values such as number of inlinable callers
491 of the function or function body size. */
493 static int
494 cgraph_edge_badness (struct cgraph_edge *edge)
496 int badness;
497 int growth =
498 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
500 growth -= edge->caller->global.insns;
502 /* Always prefer inlining saving code size. */
503 if (growth <= 0)
504 badness = INT_MIN - growth;
506 /* When profiling is available, base priorities -(#calls / growth).
507 So we optimize for overall number of "executed" inlined calls. */
508 else if (max_count)
509 badness = ((int)((double)edge->count * INT_MIN / max_count)) / growth;
511 /* When function local profile is available, base priorities on
512 growth / frequency, so we optimize for overall frequency of inlined
513 calls. This is not too accurate since while the call might be frequent
514 within function, the function itself is infrequent.
516 Other objective to optimize for is number of different calls inlined.
517 We add the estimated growth after inlining all functions to biass the
518 priorities slightly in this direction (so fewer times called functions
519 of the same size gets priority). */
520 else if (flag_guess_branch_prob)
522 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE;
523 int growth =
524 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
525 growth -= edge->caller->global.insns;
526 badness = growth * 256;
528 /* Decrease badness if call is nested. */
529 /* Compress the range so we don't overflow. */
530 if (div > 256)
531 div = 256 + ceil_log2 (div) - 8;
532 if (div < 1)
533 div = 1;
534 if (badness > 0)
535 badness /= div;
536 badness += cgraph_estimate_growth (edge->callee);
538 /* When function local profile is not available or it does not give
539 useful information (ie frequency is zero), base the cost on
540 loop nest and overall size growth, so we optimize for overall number
541 of functions fully inlined in program. */
542 else
544 int nest = MIN (edge->loop_nest, 8);
545 badness = cgraph_estimate_growth (edge->callee) * 256;
547 /* Decrease badness if call is nested. */
548 if (badness > 0)
549 badness >>= nest;
550 else
552 badness <<= nest;
555 /* Make recursive inlining happen always after other inlining is done. */
556 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
557 return badness + 1;
558 else
559 return badness;
562 /* Recompute heap nodes for each of caller edge. */
564 static void
565 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
566 bitmap updated_nodes)
568 struct cgraph_edge *edge;
569 const char *failed_reason;
571 if (!node->local.inlinable || node->local.disregard_inline_limits
572 || node->global.inlined_to)
573 return;
574 if (bitmap_bit_p (updated_nodes, node->uid))
575 return;
576 bitmap_set_bit (updated_nodes, node->uid);
577 node->global.estimated_growth = INT_MIN;
579 if (!node->local.inlinable)
580 return;
581 /* Prune out edges we won't inline into anymore. */
582 if (!cgraph_default_inline_p (node, &failed_reason))
584 for (edge = node->callers; edge; edge = edge->next_caller)
585 if (edge->aux)
587 fibheap_delete_node (heap, edge->aux);
588 edge->aux = NULL;
589 if (edge->inline_failed)
590 edge->inline_failed = failed_reason;
592 return;
595 for (edge = node->callers; edge; edge = edge->next_caller)
596 if (edge->inline_failed)
598 int badness = cgraph_edge_badness (edge);
599 if (edge->aux)
601 fibnode_t n = edge->aux;
602 gcc_assert (n->data == edge);
603 if (n->key == badness)
604 continue;
606 /* fibheap_replace_key only increase the keys. */
607 if (fibheap_replace_key (heap, n, badness))
608 continue;
609 fibheap_delete_node (heap, edge->aux);
611 edge->aux = fibheap_insert (heap, badness, edge);
615 /* Recompute heap nodes for each of caller edges of each of callees. */
617 static void
618 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
619 bitmap updated_nodes)
621 struct cgraph_edge *e;
622 node->global.estimated_growth = INT_MIN;
624 for (e = node->callees; e; e = e->next_callee)
625 if (e->inline_failed)
626 update_caller_keys (heap, e->callee, updated_nodes);
627 else if (!e->inline_failed)
628 update_callee_keys (heap, e->callee, updated_nodes);
631 /* Enqueue all recursive calls from NODE into priority queue depending on
632 how likely we want to recursively inline the call. */
634 static void
635 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
636 fibheap_t heap)
638 static int priority;
639 struct cgraph_edge *e;
640 for (e = where->callees; e; e = e->next_callee)
641 if (e->callee == node)
643 /* When profile feedback is available, prioritize by expected number
644 of calls. Without profile feedback we maintain simple queue
645 to order candidates via recursive depths. */
646 fibheap_insert (heap,
647 !max_count ? priority++
648 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
651 for (e = where->callees; e; e = e->next_callee)
652 if (!e->inline_failed)
653 lookup_recursive_calls (node, e->callee, heap);
656 /* Decide on recursive inlining: in the case function has recursive calls,
657 inline until body size reaches given argument. */
659 static bool
660 cgraph_decide_recursive_inlining (struct cgraph_node *node)
662 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
663 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
664 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
665 fibheap_t heap;
666 struct cgraph_edge *e;
667 struct cgraph_node *master_clone, *next;
668 int depth = 0;
669 int n = 0;
671 if (optimize_size)
672 return false;
674 if (DECL_DECLARED_INLINE_P (node->decl))
676 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
677 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
680 /* Make sure that function is small enough to be considered for inlining. */
681 if (!max_depth
682 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
683 return false;
684 heap = fibheap_new ();
685 lookup_recursive_calls (node, node, heap);
686 if (fibheap_empty (heap))
688 fibheap_delete (heap);
689 return false;
692 if (dump_file)
693 fprintf (dump_file,
694 " Performing recursive inlining on %s\n",
695 cgraph_node_name (node));
697 /* We need original clone to copy around. */
698 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1, false);
699 master_clone->needed = true;
700 for (e = master_clone->callees; e; e = e->next_callee)
701 if (!e->inline_failed)
702 cgraph_clone_inlined_nodes (e, true, false);
704 /* Do the inlining and update list of recursive call during process. */
705 while (!fibheap_empty (heap)
706 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
707 <= limit))
709 struct cgraph_edge *curr = fibheap_extract_min (heap);
710 struct cgraph_node *cnode;
712 depth = 1;
713 for (cnode = curr->caller;
714 cnode->global.inlined_to; cnode = cnode->callers->caller)
715 if (node->decl == curr->callee->decl)
716 depth++;
717 if (depth > max_depth)
719 if (dump_file)
720 fprintf (dump_file,
721 " maxmal depth reached\n");
722 continue;
725 if (max_count)
727 if (!cgraph_maybe_hot_edge_p (curr))
729 if (dump_file)
730 fprintf (dump_file, " Not inlining cold call\n");
731 continue;
733 if (curr->count * 100 / node->count < probability)
735 if (dump_file)
736 fprintf (dump_file,
737 " Probability of edge is too small\n");
738 continue;
742 if (dump_file)
744 fprintf (dump_file,
745 " Inlining call of depth %i", depth);
746 if (node->count)
748 fprintf (dump_file, " called approx. %.2f times per call",
749 (double)curr->count / node->count);
751 fprintf (dump_file, "\n");
753 cgraph_redirect_edge_callee (curr, master_clone);
754 cgraph_mark_inline_edge (curr, false);
755 lookup_recursive_calls (node, curr->callee, heap);
756 n++;
758 if (!fibheap_empty (heap) && dump_file)
759 fprintf (dump_file, " Recursive inlining growth limit met.\n");
761 fibheap_delete (heap);
762 if (dump_file)
763 fprintf (dump_file,
764 "\n Inlined %i times, body grown from %i to %i insns\n", n,
765 master_clone->global.insns, node->global.insns);
767 /* Remove master clone we used for inlining. We rely that clones inlined
768 into master clone gets queued just before master clone so we don't
769 need recursion. */
770 for (node = cgraph_nodes; node != master_clone;
771 node = next)
773 next = node->next;
774 if (node->global.inlined_to == master_clone)
775 cgraph_remove_node (node);
777 cgraph_remove_node (master_clone);
778 /* FIXME: Recursive inlining actually reduces number of calls of the
779 function. At this place we should probably walk the function and
780 inline clones and compensate the counts accordingly. This probably
781 doesn't matter much in practice. */
782 return n > 0;
785 /* Set inline_failed for all callers of given function to REASON. */
787 static void
788 cgraph_set_inline_failed (struct cgraph_node *node, const char *reason)
790 struct cgraph_edge *e;
792 if (dump_file)
793 fprintf (dump_file, "Inlining failed: %s\n", reason);
794 for (e = node->callers; e; e = e->next_caller)
795 if (e->inline_failed)
796 e->inline_failed = reason;
799 /* Given whole compilation unit estimate of INSNS, compute how large we can
800 allow the unit to grow. */
801 static int
802 compute_max_insns (int insns)
804 int max_insns = insns;
805 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
806 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
808 return ((HOST_WIDEST_INT) max_insns
809 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
812 /* We use greedy algorithm for inlining of small functions:
813 All inline candidates are put into prioritized heap based on estimated
814 growth of the overall number of instructions and then update the estimates.
816 INLINED and INLINED_CALEES are just pointers to arrays large enough
817 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
819 static void
820 cgraph_decide_inlining_of_small_functions (void)
822 struct cgraph_node *node;
823 struct cgraph_edge *edge;
824 const char *failed_reason;
825 fibheap_t heap = fibheap_new ();
826 bitmap updated_nodes = BITMAP_ALLOC (NULL);
827 int min_insns, max_insns;
829 if (dump_file)
830 fprintf (dump_file, "\nDeciding on smaller functions:\n");
832 /* Put all inline candidates into the heap. */
834 for (node = cgraph_nodes; node; node = node->next)
836 if (!node->local.inlinable || !node->callers
837 || node->local.disregard_inline_limits)
838 continue;
839 if (dump_file)
840 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
842 node->global.estimated_growth = INT_MIN;
843 if (!cgraph_default_inline_p (node, &failed_reason))
845 cgraph_set_inline_failed (node, failed_reason);
846 continue;
849 for (edge = node->callers; edge; edge = edge->next_caller)
850 if (edge->inline_failed)
852 gcc_assert (!edge->aux);
853 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
857 max_insns = compute_max_insns (overall_insns);
858 min_insns = overall_insns;
860 while (overall_insns <= max_insns && (edge = fibheap_extract_min (heap)))
862 int old_insns = overall_insns;
863 struct cgraph_node *where;
864 int growth =
865 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
867 growth -= edge->caller->global.insns;
869 if (dump_file)
871 fprintf (dump_file,
872 "\nConsidering %s with %i insns\n",
873 cgraph_node_name (edge->callee),
874 edge->callee->global.insns);
875 fprintf (dump_file,
876 " to be inlined into %s\n"
877 " Estimated growth after inlined into all callees is %+i insns.\n"
878 " Estimated badness is %i, frequency %.2f.\n",
879 cgraph_node_name (edge->caller),
880 cgraph_estimate_growth (edge->callee),
881 cgraph_edge_badness (edge),
882 edge->frequency / (double)CGRAPH_FREQ_BASE);
883 if (edge->count)
884 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
886 gcc_assert (edge->aux);
887 edge->aux = NULL;
888 if (!edge->inline_failed)
889 continue;
891 /* When not having profile info ready we don't weight by any way the
892 position of call in procedure itself. This means if call of
893 function A from function B seems profitable to inline, the recursive
894 call of function A in inline copy of A in B will look profitable too
895 and we end up inlining until reaching maximal function growth. This
896 is not good idea so prohibit the recursive inlining.
898 ??? When the frequencies are taken into account we might not need this
899 restriction. */
900 if (!max_count)
902 where = edge->caller;
903 while (where->global.inlined_to)
905 if (where->decl == edge->callee->decl)
906 break;
907 where = where->callers->caller;
909 if (where->global.inlined_to)
911 edge->inline_failed
912 = (edge->callee->local.disregard_inline_limits ? N_("recursive inlining") : "");
913 if (dump_file)
914 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
915 continue;
919 if ((!cgraph_maybe_hot_edge_p (edge) || optimize_size) && growth > 0)
921 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
922 &edge->inline_failed))
924 edge->inline_failed =
925 N_("call is unlikely");
926 if (dump_file)
927 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
929 continue;
931 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
933 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
934 &edge->inline_failed))
936 if (dump_file)
937 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
939 continue;
941 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
942 &edge->inline_failed))
944 where = edge->caller;
945 if (where->global.inlined_to)
946 where = where->global.inlined_to;
947 if (!cgraph_decide_recursive_inlining (where))
948 continue;
949 update_callee_keys (heap, where, updated_nodes);
951 else
953 struct cgraph_node *callee;
954 if (!cgraph_check_inline_limits (edge->caller, edge->callee,
955 &edge->inline_failed, true))
957 if (dump_file)
958 fprintf (dump_file, " Not inlining into %s:%s.\n",
959 cgraph_node_name (edge->caller), edge->inline_failed);
960 continue;
962 callee = edge->callee;
963 cgraph_mark_inline_edge (edge, true);
964 update_callee_keys (heap, callee, updated_nodes);
966 where = edge->caller;
967 if (where->global.inlined_to)
968 where = where->global.inlined_to;
970 /* Our profitability metric can depend on local properties
971 such as number of inlinable calls and size of the function body.
972 After inlining these properties might change for the function we
973 inlined into (since it's body size changed) and for the functions
974 called by function we inlined (since number of it inlinable callers
975 might change). */
976 update_caller_keys (heap, where, updated_nodes);
977 bitmap_clear (updated_nodes);
979 if (dump_file)
981 fprintf (dump_file,
982 " Inlined into %s which now has %i insns,"
983 "net change of %+i insns.\n",
984 cgraph_node_name (edge->caller),
985 edge->caller->global.insns,
986 overall_insns - old_insns);
988 if (min_insns > overall_insns)
990 min_insns = overall_insns;
991 max_insns = compute_max_insns (min_insns);
993 if (dump_file)
994 fprintf (dump_file, "New minimal insns reached: %i\n", min_insns);
997 while ((edge = fibheap_extract_min (heap)) != NULL)
999 gcc_assert (edge->aux);
1000 edge->aux = NULL;
1001 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1002 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1003 &edge->inline_failed))
1004 edge->inline_failed = N_("--param inline-unit-growth limit reached");
1006 fibheap_delete (heap);
1007 BITMAP_FREE (updated_nodes);
1010 /* Decide on the inlining. We do so in the topological order to avoid
1011 expenses on updating data structures. */
1013 static unsigned int
1014 cgraph_decide_inlining (void)
1016 struct cgraph_node *node;
1017 int nnodes;
1018 struct cgraph_node **order =
1019 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1020 int old_insns = 0;
1021 int i;
1022 int initial_insns = 0;
1024 max_count = 0;
1025 for (node = cgraph_nodes; node; node = node->next)
1026 if (node->analyzed && (node->needed || node->reachable))
1028 struct cgraph_edge *e;
1030 initial_insns += node->local.self_insns;
1031 gcc_assert (node->local.self_insns == node->global.insns);
1032 for (e = node->callees; e; e = e->next_callee)
1033 if (max_count < e->count)
1034 max_count = e->count;
1036 overall_insns = initial_insns;
1037 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
1039 nnodes = cgraph_postorder (order);
1041 if (dump_file)
1042 fprintf (dump_file,
1043 "\nDeciding on inlining. Starting with %i insns.\n",
1044 initial_insns);
1046 for (node = cgraph_nodes; node; node = node->next)
1047 node->aux = 0;
1049 if (dump_file)
1050 fprintf (dump_file, "\nInlining always_inline functions:\n");
1052 /* In the first pass mark all always_inline edges. Do this with a priority
1053 so none of our later choices will make this impossible. */
1054 for (i = nnodes - 1; i >= 0; i--)
1056 struct cgraph_edge *e, *next;
1058 node = order[i];
1060 /* Handle nodes to be flattened, but don't update overall unit size. */
1061 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1063 if (dump_file)
1064 fprintf (dump_file,
1065 "Flattening %s\n", cgraph_node_name (node));
1066 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1069 if (!node->local.disregard_inline_limits)
1070 continue;
1071 if (dump_file)
1072 fprintf (dump_file,
1073 "\nConsidering %s %i insns (always inline)\n",
1074 cgraph_node_name (node), node->global.insns);
1075 old_insns = overall_insns;
1076 for (e = node->callers; e; e = next)
1078 next = e->next_caller;
1079 if (!e->inline_failed)
1080 continue;
1081 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1082 &e->inline_failed))
1083 continue;
1084 cgraph_mark_inline_edge (e, true);
1085 if (dump_file)
1086 fprintf (dump_file,
1087 " Inlined into %s which now has %i insns.\n",
1088 cgraph_node_name (e->caller),
1089 e->caller->global.insns);
1091 /* Inlining self recursive function might introduce new calls to
1092 themselves we didn't see in the loop above. Fill in the proper
1093 reason why inline failed. */
1094 for (e = node->callers; e; e = e->next_caller)
1095 if (e->inline_failed)
1096 e->inline_failed = N_("recursive inlining");
1097 if (dump_file)
1098 fprintf (dump_file,
1099 " Inlined for a net change of %+i insns.\n",
1100 overall_insns - old_insns);
1103 if (!flag_really_no_inline)
1104 cgraph_decide_inlining_of_small_functions ();
1106 if (!flag_really_no_inline
1107 && flag_inline_functions_called_once)
1109 if (dump_file)
1110 fprintf (dump_file, "\nDeciding on functions called once:\n");
1112 /* And finally decide what functions are called once. */
1114 for (i = nnodes - 1; i >= 0; i--)
1116 node = order[i];
1118 if (node->callers && !node->callers->next_caller && !node->needed
1119 && node->local.inlinable && node->callers->inline_failed
1120 && !DECL_EXTERNAL (node->decl) && !DECL_COMDAT (node->decl))
1122 if (dump_file)
1124 fprintf (dump_file,
1125 "\nConsidering %s %i insns.\n",
1126 cgraph_node_name (node), node->global.insns);
1127 fprintf (dump_file,
1128 " Called once from %s %i insns.\n",
1129 cgraph_node_name (node->callers->caller),
1130 node->callers->caller->global.insns);
1133 old_insns = overall_insns;
1135 if (cgraph_check_inline_limits (node->callers->caller, node,
1136 NULL, false))
1138 cgraph_mark_inline (node->callers);
1139 if (dump_file)
1140 fprintf (dump_file,
1141 " Inlined into %s which now has %i insns"
1142 " for a net change of %+i insns.\n",
1143 cgraph_node_name (node->callers->caller),
1144 node->callers->caller->global.insns,
1145 overall_insns - old_insns);
1147 else
1149 if (dump_file)
1150 fprintf (dump_file,
1151 " Inline limit reached, not inlined.\n");
1157 if (dump_file)
1158 fprintf (dump_file,
1159 "\nInlined %i calls, eliminated %i functions, "
1160 "%i insns turned to %i insns.\n\n",
1161 ncalls_inlined, nfunctions_inlined, initial_insns,
1162 overall_insns);
1163 free (order);
1164 return 0;
1167 /* Try to inline edge E from incremental inliner. MODE specifies mode
1168 of inliner.
1170 We are detecting cycles by storing mode of inliner into cgraph_node last
1171 time we visited it in the recursion. In general when mode is set, we have
1172 recursive inlining, but as an special case, we want to try harder inline
1173 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1174 flatten, b being always inline. Flattening 'a' will collapse
1175 a->b->c before hitting cycle. To accommodate always inline, we however
1176 need to inline a->b->c->b.
1178 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1179 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1180 static bool
1181 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1183 struct cgraph_node *callee = e->callee;
1184 enum inlining_mode callee_mode = (size_t) callee->aux;
1185 bool always_inline = e->callee->local.disregard_inline_limits;
1187 /* We've hit cycle? */
1188 if (callee_mode)
1190 /* It is first time we see it and we are not in ALWAY_INLINE only
1191 mode yet. and the function in question is always_inline. */
1192 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1194 if (dump_file)
1196 indent_to (dump_file, depth);
1197 fprintf (dump_file,
1198 "Hit cycle in %s, switching to always inline only.\n",
1199 cgraph_node_name (callee));
1201 mode = INLINE_ALWAYS_INLINE;
1203 /* Otherwise it is time to give up. */
1204 else
1206 if (dump_file)
1208 indent_to (dump_file, depth);
1209 fprintf (dump_file,
1210 "Not inlining %s into %s to avoid cycle.\n",
1211 cgraph_node_name (callee),
1212 cgraph_node_name (e->caller));
1214 e->inline_failed = (e->callee->local.disregard_inline_limits
1215 ? N_("recursive inlining") : "");
1216 return false;
1220 callee->aux = (void *)(size_t) mode;
1221 if (dump_file)
1223 indent_to (dump_file, depth);
1224 fprintf (dump_file, " Inlining %s into %s.\n",
1225 cgraph_node_name (e->callee),
1226 cgraph_node_name (e->caller));
1228 if (e->inline_failed)
1229 cgraph_mark_inline (e);
1231 /* In order to fully inline always_inline functions at -O0, we need to
1232 recurse here, since the inlined functions might not be processed by
1233 incremental inlining at all yet.
1235 Also flattening needs to be done recursively. */
1237 if (!flag_unit_at_a_time || mode == INLINE_ALL || always_inline)
1238 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1239 callee->aux = (void *)(size_t) callee_mode;
1240 return true;
1243 /* Decide on the inlining. We do so in the topological order to avoid
1244 expenses on updating data structures.
1245 DEPTH is depth of recursion, used only for debug output. */
1247 static bool
1248 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1249 enum inlining_mode mode,
1250 int depth)
1252 struct cgraph_edge *e;
1253 bool inlined = false;
1254 const char *failed_reason;
1255 enum inlining_mode old_mode;
1257 #ifdef ENABLE_CHECKING
1258 verify_cgraph_node (node);
1259 #endif
1261 old_mode = (size_t)node->aux;
1263 if (mode != INLINE_ALWAYS_INLINE
1264 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1266 if (dump_file)
1268 indent_to (dump_file, depth);
1269 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1271 mode = INLINE_ALL;
1274 node->aux = (void *)(size_t) mode;
1276 /* First of all look for always inline functions. */
1277 for (e = node->callees; e; e = e->next_callee)
1279 if (!e->callee->local.disregard_inline_limits
1280 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1281 continue;
1282 /* When the edge is already inlined, we just need to recurse into
1283 it in order to fully flatten the leaves. */
1284 if (!e->inline_failed && mode == INLINE_ALL)
1286 inlined |= try_inline (e, mode, depth);
1287 continue;
1289 if (dump_file)
1291 indent_to (dump_file, depth);
1292 fprintf (dump_file,
1293 "Considering to always inline inline candidate %s.\n",
1294 cgraph_node_name (e->callee));
1296 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1298 if (dump_file)
1300 indent_to (dump_file, depth);
1301 fprintf (dump_file, "Not inlining: recursive call.\n");
1303 continue;
1305 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1306 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1308 if (dump_file)
1310 indent_to (dump_file, depth);
1311 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1313 continue;
1315 if (!DECL_SAVED_TREE (e->callee->decl) && !e->callee->inline_decl)
1317 if (dump_file)
1319 indent_to (dump_file, depth);
1320 fprintf (dump_file,
1321 "Not inlining: Function body no longer available.\n");
1323 continue;
1325 inlined |= try_inline (e, mode, depth);
1328 /* Now do the automatic inlining. */
1329 if (!flag_really_no_inline && mode != INLINE_ALL
1330 && mode != INLINE_ALWAYS_INLINE)
1331 for (e = node->callees; e; e = e->next_callee)
1333 if (!e->callee->local.inlinable
1334 || !e->inline_failed
1335 || e->callee->local.disregard_inline_limits)
1336 continue;
1337 if (dump_file)
1338 fprintf (dump_file, "Considering inline candidate %s.\n",
1339 cgraph_node_name (e->callee));
1340 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1342 if (dump_file)
1344 indent_to (dump_file, depth);
1345 fprintf (dump_file, "Not inlining: recursive call.\n");
1347 continue;
1349 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1350 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1352 if (dump_file)
1354 indent_to (dump_file, depth);
1355 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1357 continue;
1359 /* When the function body would grow and inlining the function won't
1360 eliminate the need for offline copy of the function, don't inline.
1362 if (mode == INLINE_SIZE
1363 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1364 > e->caller->global.insns)
1365 && cgraph_estimate_growth (e->callee) > 0)
1367 if (dump_file)
1369 indent_to (dump_file, depth);
1370 fprintf (dump_file,
1371 "Not inlining: code size would grow by %i insns.\n",
1372 cgraph_estimate_size_after_inlining (1, e->caller,
1373 e->callee)
1374 - e->caller->global.insns);
1376 continue;
1378 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1379 false))
1381 if (dump_file)
1383 indent_to (dump_file, depth);
1384 fprintf (dump_file, "Not inlining: %s.\n", e->inline_failed);
1386 continue;
1388 if (!DECL_SAVED_TREE (e->callee->decl) && !e->callee->inline_decl)
1390 if (dump_file)
1392 indent_to (dump_file, depth);
1393 fprintf (dump_file,
1394 "Not inlining: Function body no longer available.\n");
1396 continue;
1398 if (cgraph_default_inline_p (e->callee, &failed_reason))
1399 inlined |= try_inline (e, mode, depth);
1400 else if (!flag_unit_at_a_time)
1401 e->inline_failed = failed_reason;
1403 node->aux = (void *)(size_t) old_mode;
1404 return inlined;
1407 /* When inlining shall be performed. */
1408 static bool
1409 cgraph_gate_inlining (void)
1411 return flag_inline_trees;
1414 struct tree_opt_pass pass_ipa_inline =
1416 "inline", /* name */
1417 cgraph_gate_inlining, /* gate */
1418 cgraph_decide_inlining, /* execute */
1419 NULL, /* sub */
1420 NULL, /* next */
1421 0, /* static_pass_number */
1422 TV_INLINE_HEURISTICS, /* tv_id */
1423 0, /* properties_required */
1424 PROP_cfg, /* properties_provided */
1425 0, /* properties_destroyed */
1426 TODO_remove_functions, /* todo_flags_finish */
1427 TODO_dump_cgraph | TODO_dump_func
1428 | TODO_remove_functions, /* todo_flags_finish */
1429 0 /* letter */
1432 /* Because inlining might remove no-longer reachable nodes, we need to
1433 keep the array visible to garbage collector to avoid reading collected
1434 out nodes. */
1435 static int nnodes;
1436 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1438 /* Do inlining of small functions. Doing so early helps profiling and other
1439 passes to be somewhat more effective and avoids some code duplication in
1440 later real inlining pass for testcases with very many function calls. */
1441 static unsigned int
1442 cgraph_early_inlining (void)
1444 struct cgraph_node *node = cgraph_node (current_function_decl);
1445 unsigned int todo = 0;
1447 if (sorrycount || errorcount)
1448 return 0;
1449 if (cgraph_decide_inlining_incrementally (node,
1450 flag_unit_at_a_time || optimize_size
1451 ? INLINE_SIZE : INLINE_SPEED, 0))
1453 timevar_push (TV_INTEGRATION);
1454 todo = optimize_inline_calls (current_function_decl);
1455 timevar_pop (TV_INTEGRATION);
1457 return todo;
1460 /* When inlining shall be performed. */
1461 static bool
1462 cgraph_gate_early_inlining (void)
1464 return flag_inline_trees && flag_early_inlining;
1467 struct tree_opt_pass pass_early_inline =
1469 "einline", /* name */
1470 cgraph_gate_early_inlining, /* gate */
1471 cgraph_early_inlining, /* execute */
1472 NULL, /* sub */
1473 NULL, /* next */
1474 0, /* static_pass_number */
1475 TV_INLINE_HEURISTICS, /* tv_id */
1476 0, /* properties_required */
1477 PROP_cfg, /* properties_provided */
1478 0, /* properties_destroyed */
1479 0, /* todo_flags_start */
1480 TODO_dump_func, /* todo_flags_finish */
1481 0 /* letter */
1484 /* When inlining shall be performed. */
1485 static bool
1486 cgraph_gate_ipa_early_inlining (void)
1488 return (flag_inline_trees && flag_early_inlining
1489 && (flag_branch_probabilities || flag_test_coverage
1490 || profile_arc_flag));
1493 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1494 before tree profiling so we have stand alone IPA pass for doing so. */
1495 struct tree_opt_pass pass_ipa_early_inline =
1497 "einline_ipa", /* name */
1498 cgraph_gate_ipa_early_inlining, /* gate */
1499 NULL, /* execute */
1500 NULL, /* sub */
1501 NULL, /* next */
1502 0, /* static_pass_number */
1503 TV_INLINE_HEURISTICS, /* tv_id */
1504 0, /* properties_required */
1505 PROP_cfg, /* properties_provided */
1506 0, /* properties_destroyed */
1507 0, /* todo_flags_start */
1508 TODO_dump_cgraph, /* todo_flags_finish */
1509 0 /* letter */
1512 /* Compute parameters of functions used by inliner. */
1513 static unsigned int
1514 compute_inline_parameters (void)
1516 struct cgraph_node *node = cgraph_node (current_function_decl);
1518 gcc_assert (!node->global.inlined_to);
1519 node->local.estimated_self_stack_size = estimated_stack_frame_size ();
1520 node->global.estimated_stack_size = node->local.estimated_self_stack_size;
1521 node->global.stack_frame_offset = 0;
1522 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1523 node->local.self_insns = estimate_num_insns (current_function_decl,
1524 &eni_inlining_weights);
1525 if (node->local.inlinable)
1526 node->local.disregard_inline_limits
1527 = lang_hooks.tree_inlining.disregard_inline_limits (current_function_decl);
1528 if (flag_really_no_inline && !node->local.disregard_inline_limits)
1529 node->local.inlinable = 0;
1530 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1531 node->global.insns = node->local.self_insns;
1532 return 0;
1535 /* When inlining shall be performed. */
1536 static bool
1537 gate_inline_passes (void)
1539 return flag_inline_trees;
1542 struct tree_opt_pass pass_inline_parameters =
1544 NULL, /* name */
1545 gate_inline_passes, /* gate */
1546 compute_inline_parameters, /* execute */
1547 NULL, /* sub */
1548 NULL, /* next */
1549 0, /* static_pass_number */
1550 TV_INLINE_HEURISTICS, /* tv_id */
1551 0, /* properties_required */
1552 PROP_cfg, /* properties_provided */
1553 0, /* properties_destroyed */
1554 0, /* todo_flags_start */
1555 0, /* todo_flags_finish */
1556 0 /* letter */
1559 /* Apply inline plan to the function. */
1560 static unsigned int
1561 apply_inline (void)
1563 unsigned int todo = 0;
1564 struct cgraph_edge *e;
1565 struct cgraph_node *node = cgraph_node (current_function_decl);
1567 /* Even when not optimizing, ensure that always_inline functions get inlined.
1569 if (!optimize)
1570 cgraph_decide_inlining_incrementally (node, INLINE_SPEED, 0);
1572 /* We might need the body of this function so that we can expand
1573 it inline somewhere else. */
1574 if (cgraph_preserve_function_body_p (current_function_decl))
1575 save_inline_function_body (node);
1577 for (e = node->callees; e; e = e->next_callee)
1578 if (!e->inline_failed || warn_inline)
1579 break;
1580 if (e)
1582 timevar_push (TV_INTEGRATION);
1583 todo = optimize_inline_calls (current_function_decl);
1584 timevar_pop (TV_INTEGRATION);
1586 /* In non-unit-at-a-time we must mark all referenced functions as needed. */
1587 if (!flag_unit_at_a_time)
1589 struct cgraph_edge *e;
1590 for (e = node->callees; e; e = e->next_callee)
1591 if (e->callee->analyzed)
1592 cgraph_mark_needed_node (e->callee);
1594 return todo | execute_fixup_cfg ();
1597 struct tree_opt_pass pass_apply_inline =
1599 "apply_inline", /* name */
1600 NULL, /* gate */
1601 apply_inline, /* execute */
1602 NULL, /* sub */
1603 NULL, /* next */
1604 0, /* static_pass_number */
1605 TV_INLINE_HEURISTICS, /* tv_id */
1606 0, /* properties_required */
1607 PROP_cfg, /* properties_provided */
1608 0, /* properties_destroyed */
1609 0, /* todo_flags_start */
1610 TODO_dump_func | TODO_verify_flow
1611 | TODO_verify_stmts, /* todo_flags_finish */
1612 0 /* letter */
1615 #include "gt-ipa-inline.h"