* gcc.dg/pr26570.c: Clean up coverage files.
[official-gcc.git] / gcc / ipa-inline.c
blob736a3ae7a93c19c5b9be782bec33733dbdd1604c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
29 There are three major parts of this file:
31 cgraph_mark_inline implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
43 inlining heuristics
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used in non-unit-at-a-time mode.
66 The inliner itself is split into several passes:
68 pass_inline_parameters
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
79 pass_early_inlining
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
86 This is the main inlining pass in non-unit-at-a-time.
88 With unit-at-a-time the pass is run during conversion into SSA form.
89 Only functions already converted into SSA form are inlined, so the
90 conversion must happen in topological order on the callgraph (that is
91 maintained by pass manager). The functions after inlining are early
92 optimized so the early inliner sees unoptimized function itself, but
93 all considered callees are already optimized allowing it to unfold
94 abstraction penalty on C++ effectively and cheaply.
96 pass_ipa_early_inlining
98 With profiling, the early inlining is also necessary to reduce
99 instrumentation costs on program with high abstraction penalty (doing
100 many redundant calls). This can't happen in parallel with early
101 optimization and profile instrumentation, because we would end up
102 re-instrumenting already instrumented function bodies we brought in via
103 inlining.
105 To avoid this, this pass is executed as IPA pass before profiling. It is
106 simple wrapper to pass_early_inlining and ensures first inlining.
108 pass_ipa_inline
110 This is the main pass implementing simple greedy algorithm to do inlining
111 of small functions that results in overall growth of compilation unit and
112 inlining of functions called once. The pass compute just so called inline
113 plan (representation of inlining to be done in callgraph) and unlike early
114 inlining it is not performing the inlining itself.
116 pass_apply_inline
118 This pass performs actual inlining according to pass_ipa_inline on given
119 function. Possible the function body before inlining is saved when it is
120 needed for further inlining later.
123 #include "config.h"
124 #include "system.h"
125 #include "coretypes.h"
126 #include "tm.h"
127 #include "tree.h"
128 #include "tree-inline.h"
129 #include "langhooks.h"
130 #include "flags.h"
131 #include "cgraph.h"
132 #include "diagnostic.h"
133 #include "timevar.h"
134 #include "params.h"
135 #include "fibheap.h"
136 #include "intl.h"
137 #include "tree-pass.h"
138 #include "hashtab.h"
139 #include "coverage.h"
140 #include "ggc.h"
141 #include "tree-flow.h"
143 /* Mode incremental inliner operate on:
145 In ALWAYS_INLINE only functions marked
146 always_inline are inlined. This mode is used after detecting cycle during
147 flattening.
149 In SIZE mode, only functions that reduce function body size after inlining
150 are inlined, this is used during early inlining.
152 In SPEED mode, all small functions are inlined. This might result in
153 unbounded growth of compilation unit and is used only in non-unit-at-a-time
154 mode.
156 in ALL mode, everything is inlined. This is used during flattening. */
157 enum inlining_mode {
158 INLINE_NONE = 0,
159 INLINE_ALWAYS_INLINE,
160 INLINE_SIZE,
161 INLINE_SPEED,
162 INLINE_ALL
164 static bool
165 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
166 int);
169 /* Statistics we collect about inlining algorithm. */
170 static int ncalls_inlined;
171 static int nfunctions_inlined;
172 static int overall_insns;
173 static gcov_type max_count;
175 /* Estimate size of the function after inlining WHAT into TO. */
177 static int
178 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
179 struct cgraph_node *what)
181 int size;
182 tree fndecl = what->decl, arg;
183 int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
185 for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
186 call_insns += estimate_move_cost (TREE_TYPE (arg));
187 size = (what->global.insns - call_insns) * times + to->global.insns;
188 gcc_assert (size >= 0);
189 return size;
192 /* E is expected to be an edge being inlined. Clone destination node of
193 the edge and redirect it to the new clone.
194 DUPLICATE is used for bookkeeping on whether we are actually creating new
195 clones or re-using node originally representing out-of-line function call.
197 void
198 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, bool update_original)
200 HOST_WIDE_INT peak;
201 if (duplicate)
203 /* We may eliminate the need for out-of-line copy to be output.
204 In that case just go ahead and re-use it. */
205 if (!e->callee->callers->next_caller
206 && !e->callee->needed
207 && !cgraph_new_nodes
208 && flag_unit_at_a_time)
210 gcc_assert (!e->callee->global.inlined_to);
211 if (DECL_SAVED_TREE (e->callee->decl))
212 overall_insns -= e->callee->global.insns, nfunctions_inlined++;
213 duplicate = false;
215 else
217 struct cgraph_node *n;
218 n = cgraph_clone_node (e->callee, e->count, e->loop_nest,
219 update_original);
220 cgraph_redirect_edge_callee (e, n);
224 if (e->caller->global.inlined_to)
225 e->callee->global.inlined_to = e->caller->global.inlined_to;
226 else
227 e->callee->global.inlined_to = e->caller;
228 e->callee->global.stack_frame_offset
229 = e->caller->global.stack_frame_offset + e->caller->local.estimated_self_stack_size;
230 peak = e->callee->global.stack_frame_offset + e->callee->local.estimated_self_stack_size;
231 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
232 e->callee->global.inlined_to->global.estimated_stack_size = peak;
234 /* Recursively clone all bodies. */
235 for (e = e->callee->callees; e; e = e->next_callee)
236 if (!e->inline_failed)
237 cgraph_clone_inlined_nodes (e, duplicate, update_original);
240 /* Mark edge E as inlined and update callgraph accordingly.
241 UPDATE_ORIGINAL specify whether profile of original function should be
242 updated. */
244 void
245 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original)
247 int old_insns = 0, new_insns = 0;
248 struct cgraph_node *to = NULL, *what;
250 if (e->callee->inline_decl)
251 cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
253 gcc_assert (e->inline_failed);
254 e->inline_failed = NULL;
256 if (!e->callee->global.inlined && flag_unit_at_a_time)
257 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
258 e->callee->global.inlined = true;
260 cgraph_clone_inlined_nodes (e, true, update_original);
262 what = e->callee;
264 /* Now update size of caller and all functions caller is inlined into. */
265 for (;e && !e->inline_failed; e = e->caller->callers)
267 old_insns = e->caller->global.insns;
268 new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
269 what);
270 gcc_assert (new_insns >= 0);
271 to = e->caller;
272 to->global.insns = new_insns;
274 gcc_assert (what->global.inlined_to == to);
275 if (new_insns > old_insns)
276 overall_insns += new_insns - old_insns;
277 ncalls_inlined++;
280 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
281 Return following unredirected edge in the list of callers
282 of EDGE->CALLEE */
284 static struct cgraph_edge *
285 cgraph_mark_inline (struct cgraph_edge *edge)
287 struct cgraph_node *to = edge->caller;
288 struct cgraph_node *what = edge->callee;
289 struct cgraph_edge *e, *next;
290 int times = 0;
292 /* Look for all calls, mark them inline and clone recursively
293 all inlined functions. */
294 for (e = what->callers; e; e = next)
296 next = e->next_caller;
297 if (e->caller == to && e->inline_failed)
299 cgraph_mark_inline_edge (e, true);
300 if (e == edge)
301 edge = next;
302 times++;
305 gcc_assert (times);
306 return edge;
309 /* Estimate the growth caused by inlining NODE into all callees. */
311 static int
312 cgraph_estimate_growth (struct cgraph_node *node)
314 int growth = 0;
315 struct cgraph_edge *e;
316 if (node->global.estimated_growth != INT_MIN)
317 return node->global.estimated_growth;
319 for (e = node->callers; e; e = e->next_caller)
320 if (e->inline_failed)
321 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
322 - e->caller->global.insns);
324 /* ??? Wrong for self recursive functions or cases where we decide to not
325 inline for different reasons, but it is not big deal as in that case
326 we will keep the body around, but we will also avoid some inlining. */
327 if (!node->needed && !DECL_EXTERNAL (node->decl))
328 growth -= node->global.insns;
330 node->global.estimated_growth = growth;
331 return growth;
334 /* Return false when inlining WHAT into TO is not good idea
335 as it would cause too large growth of function bodies.
336 When ONE_ONLY is true, assume that only one call site is going
337 to be inlined, otherwise figure out how many call sites in
338 TO calls WHAT and verify that all can be inlined.
341 static bool
342 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
343 const char **reason, bool one_only)
345 int times = 0;
346 struct cgraph_edge *e;
347 int newsize;
348 int limit;
349 HOST_WIDE_INT stack_size_limit, inlined_stack;
351 if (one_only)
352 times = 1;
353 else
354 for (e = to->callees; e; e = e->next_callee)
355 if (e->callee == what)
356 times++;
358 if (to->global.inlined_to)
359 to = to->global.inlined_to;
361 /* When inlining large function body called once into small function,
362 take the inlined function as base for limiting the growth. */
363 if (to->local.self_insns > what->local.self_insns)
364 limit = to->local.self_insns;
365 else
366 limit = what->local.self_insns;
368 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
370 /* Check the size after inlining against the function limits. But allow
371 the function to shrink if it went over the limits by forced inlining. */
372 newsize = cgraph_estimate_size_after_inlining (times, to, what);
373 if (newsize >= to->global.insns
374 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
375 && newsize > limit)
377 if (reason)
378 *reason = N_("--param large-function-growth limit reached");
379 return false;
382 stack_size_limit = to->local.estimated_self_stack_size;
384 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
386 inlined_stack = (to->global.stack_frame_offset
387 + to->local.estimated_self_stack_size
388 + what->global.estimated_stack_size);
389 if (inlined_stack > stack_size_limit
390 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
392 if (reason)
393 *reason = N_("--param large-stack-frame-growth limit reached");
394 return false;
396 return true;
399 /* Return true when function N is small enough to be inlined. */
401 bool
402 cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
404 tree decl = n->decl;
406 if (n->inline_decl)
407 decl = n->inline_decl;
408 if (!DECL_INLINE (decl))
410 if (reason)
411 *reason = N_("function not inlinable");
412 return false;
415 if (!DECL_STRUCT_FUNCTION (decl)->cfg)
417 if (reason)
418 *reason = N_("function body not available");
419 return false;
422 if (DECL_DECLARED_INLINE_P (decl))
424 if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
426 if (reason)
427 *reason = N_("--param max-inline-insns-single limit reached");
428 return false;
431 else
433 if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
435 if (reason)
436 *reason = N_("--param max-inline-insns-auto limit reached");
437 return false;
441 return true;
444 /* Return true when inlining WHAT would create recursive inlining.
445 We call recursive inlining all cases where same function appears more than
446 once in the single recursion nest path in the inline graph. */
448 static bool
449 cgraph_recursive_inlining_p (struct cgraph_node *to,
450 struct cgraph_node *what,
451 const char **reason)
453 bool recursive;
454 if (to->global.inlined_to)
455 recursive = what->decl == to->global.inlined_to->decl;
456 else
457 recursive = what->decl == to->decl;
458 /* Marking recursive function inline has sane semantic and thus we should
459 not warn on it. */
460 if (recursive && reason)
461 *reason = (what->local.disregard_inline_limits
462 ? N_("recursive inlining") : "");
463 return recursive;
466 /* Return true if the call can be hot. */
467 static bool
468 cgraph_maybe_hot_edge_p (struct cgraph_edge *edge)
470 if (profile_info && flag_branch_probabilities
471 && (edge->count
472 <= profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION)))
473 return false;
474 return true;
477 /* A cost model driving the inlining heuristics in a way so the edges with
478 smallest badness are inlined first. After each inlining is performed
479 the costs of all caller edges of nodes affected are recomputed so the
480 metrics may accurately depend on values such as number of inlinable callers
481 of the function or function body size.
483 With profiling we use number of executions of each edge to drive the cost.
484 We also should distinguish hot and cold calls where the cold calls are
485 inlined into only when code size is overall improved.
488 static int
489 cgraph_edge_badness (struct cgraph_edge *edge)
491 if (max_count)
493 int growth =
494 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
495 growth -= edge->caller->global.insns;
497 /* Always prefer inlining saving code size. */
498 if (growth <= 0)
499 return INT_MIN - growth;
500 return ((int)((double)edge->count * INT_MIN / max_count)) / growth;
502 else
504 int nest = MIN (edge->loop_nest, 8);
505 int badness = cgraph_estimate_growth (edge->callee) * 256;
507 /* Decrease badness if call is nested. */
508 if (badness > 0)
509 badness >>= nest;
510 else
511 badness <<= nest;
513 /* Make recursive inlining happen always after other inlining is done. */
514 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
515 return badness + 1;
516 else
517 return badness;
521 /* Recompute heap nodes for each of caller edge. */
523 static void
524 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
525 bitmap updated_nodes)
527 struct cgraph_edge *edge;
528 const char *failed_reason;
530 if (!node->local.inlinable || node->local.disregard_inline_limits
531 || node->global.inlined_to)
532 return;
533 if (bitmap_bit_p (updated_nodes, node->uid))
534 return;
535 bitmap_set_bit (updated_nodes, node->uid);
536 node->global.estimated_growth = INT_MIN;
538 if (!node->local.inlinable)
539 return;
540 /* Prune out edges we won't inline into anymore. */
541 if (!cgraph_default_inline_p (node, &failed_reason))
543 for (edge = node->callers; edge; edge = edge->next_caller)
544 if (edge->aux)
546 fibheap_delete_node (heap, edge->aux);
547 edge->aux = NULL;
548 if (edge->inline_failed)
549 edge->inline_failed = failed_reason;
551 return;
554 for (edge = node->callers; edge; edge = edge->next_caller)
555 if (edge->inline_failed)
557 int badness = cgraph_edge_badness (edge);
558 if (edge->aux)
560 fibnode_t n = edge->aux;
561 gcc_assert (n->data == edge);
562 if (n->key == badness)
563 continue;
565 /* fibheap_replace_key only increase the keys. */
566 if (fibheap_replace_key (heap, n, badness))
567 continue;
568 fibheap_delete_node (heap, edge->aux);
570 edge->aux = fibheap_insert (heap, badness, edge);
574 /* Recompute heap nodes for each of caller edges of each of callees. */
576 static void
577 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
578 bitmap updated_nodes)
580 struct cgraph_edge *e;
581 node->global.estimated_growth = INT_MIN;
583 for (e = node->callees; e; e = e->next_callee)
584 if (e->inline_failed)
585 update_caller_keys (heap, e->callee, updated_nodes);
586 else if (!e->inline_failed)
587 update_callee_keys (heap, e->callee, updated_nodes);
590 /* Enqueue all recursive calls from NODE into priority queue depending on
591 how likely we want to recursively inline the call. */
593 static void
594 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
595 fibheap_t heap)
597 static int priority;
598 struct cgraph_edge *e;
599 for (e = where->callees; e; e = e->next_callee)
600 if (e->callee == node)
602 /* When profile feedback is available, prioritize by expected number
603 of calls. Without profile feedback we maintain simple queue
604 to order candidates via recursive depths. */
605 fibheap_insert (heap,
606 !max_count ? priority++
607 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
610 for (e = where->callees; e; e = e->next_callee)
611 if (!e->inline_failed)
612 lookup_recursive_calls (node, e->callee, heap);
615 /* Decide on recursive inlining: in the case function has recursive calls,
616 inline until body size reaches given argument. */
618 static bool
619 cgraph_decide_recursive_inlining (struct cgraph_node *node)
621 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
622 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
623 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
624 fibheap_t heap;
625 struct cgraph_edge *e;
626 struct cgraph_node *master_clone, *next;
627 int depth = 0;
628 int n = 0;
630 if (DECL_DECLARED_INLINE_P (node->decl))
632 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
633 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
636 /* Make sure that function is small enough to be considered for inlining. */
637 if (!max_depth
638 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
639 return false;
640 heap = fibheap_new ();
641 lookup_recursive_calls (node, node, heap);
642 if (fibheap_empty (heap))
644 fibheap_delete (heap);
645 return false;
648 if (dump_file)
649 fprintf (dump_file,
650 " Performing recursive inlining on %s\n",
651 cgraph_node_name (node));
653 /* We need original clone to copy around. */
654 master_clone = cgraph_clone_node (node, node->count, 1, false);
655 master_clone->needed = true;
656 for (e = master_clone->callees; e; e = e->next_callee)
657 if (!e->inline_failed)
658 cgraph_clone_inlined_nodes (e, true, false);
660 /* Do the inlining and update list of recursive call during process. */
661 while (!fibheap_empty (heap)
662 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
663 <= limit))
665 struct cgraph_edge *curr = fibheap_extract_min (heap);
666 struct cgraph_node *cnode;
668 depth = 1;
669 for (cnode = curr->caller;
670 cnode->global.inlined_to; cnode = cnode->callers->caller)
671 if (node->decl == curr->callee->decl)
672 depth++;
673 if (depth > max_depth)
675 if (dump_file)
676 fprintf (dump_file,
677 " maxmal depth reached\n");
678 continue;
681 if (max_count)
683 if (!cgraph_maybe_hot_edge_p (curr))
685 if (dump_file)
686 fprintf (dump_file, " Not inlining cold call\n");
687 continue;
689 if (curr->count * 100 / node->count < probability)
691 if (dump_file)
692 fprintf (dump_file,
693 " Probability of edge is too small\n");
694 continue;
698 if (dump_file)
700 fprintf (dump_file,
701 " Inlining call of depth %i", depth);
702 if (node->count)
704 fprintf (dump_file, " called approx. %.2f times per call",
705 (double)curr->count / node->count);
707 fprintf (dump_file, "\n");
709 cgraph_redirect_edge_callee (curr, master_clone);
710 cgraph_mark_inline_edge (curr, false);
711 lookup_recursive_calls (node, curr->callee, heap);
712 n++;
714 if (!fibheap_empty (heap) && dump_file)
715 fprintf (dump_file, " Recursive inlining growth limit met.\n");
717 fibheap_delete (heap);
718 if (dump_file)
719 fprintf (dump_file,
720 "\n Inlined %i times, body grown from %i to %i insns\n", n,
721 master_clone->global.insns, node->global.insns);
723 /* Remove master clone we used for inlining. We rely that clones inlined
724 into master clone gets queued just before master clone so we don't
725 need recursion. */
726 for (node = cgraph_nodes; node != master_clone;
727 node = next)
729 next = node->next;
730 if (node->global.inlined_to == master_clone)
731 cgraph_remove_node (node);
733 cgraph_remove_node (master_clone);
734 /* FIXME: Recursive inlining actually reduces number of calls of the
735 function. At this place we should probably walk the function and
736 inline clones and compensate the counts accordingly. This probably
737 doesn't matter much in practice. */
738 return n > 0;
741 /* Set inline_failed for all callers of given function to REASON. */
743 static void
744 cgraph_set_inline_failed (struct cgraph_node *node, const char *reason)
746 struct cgraph_edge *e;
748 if (dump_file)
749 fprintf (dump_file, "Inlining failed: %s\n", reason);
750 for (e = node->callers; e; e = e->next_caller)
751 if (e->inline_failed)
752 e->inline_failed = reason;
755 /* Given whole compilation unit estimate of INSNS, compute how large we can
756 allow the unit to grow. */
757 static int
758 compute_max_insns (int insns)
760 int max_insns = insns;
761 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
762 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
764 return ((HOST_WIDEST_INT) max_insns
765 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
768 /* We use greedy algorithm for inlining of small functions:
769 All inline candidates are put into prioritized heap based on estimated
770 growth of the overall number of instructions and then update the estimates.
772 INLINED and INLINED_CALEES are just pointers to arrays large enough
773 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
775 static void
776 cgraph_decide_inlining_of_small_functions (void)
778 struct cgraph_node *node;
779 struct cgraph_edge *edge;
780 const char *failed_reason;
781 fibheap_t heap = fibheap_new ();
782 bitmap updated_nodes = BITMAP_ALLOC (NULL);
783 int min_insns, max_insns;
785 if (dump_file)
786 fprintf (dump_file, "\nDeciding on smaller functions:\n");
788 /* Put all inline candidates into the heap. */
790 for (node = cgraph_nodes; node; node = node->next)
792 if (!node->local.inlinable || !node->callers
793 || node->local.disregard_inline_limits)
794 continue;
795 if (dump_file)
796 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
798 node->global.estimated_growth = INT_MIN;
799 if (!cgraph_default_inline_p (node, &failed_reason))
801 cgraph_set_inline_failed (node, failed_reason);
802 continue;
805 for (edge = node->callers; edge; edge = edge->next_caller)
806 if (edge->inline_failed)
808 gcc_assert (!edge->aux);
809 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
813 max_insns = compute_max_insns (overall_insns);
814 min_insns = overall_insns;
816 while (overall_insns <= max_insns && (edge = fibheap_extract_min (heap)))
818 int old_insns = overall_insns;
819 struct cgraph_node *where;
820 int growth =
821 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
823 growth -= edge->caller->global.insns;
825 if (dump_file)
827 fprintf (dump_file,
828 "\nConsidering %s with %i insns\n",
829 cgraph_node_name (edge->callee),
830 edge->callee->global.insns);
831 fprintf (dump_file,
832 " to be inlined into %s\n"
833 " Estimated growth after inlined into all callees is %+i insns.\n"
834 " Estimated badness is %i.\n",
835 cgraph_node_name (edge->caller),
836 cgraph_estimate_growth (edge->callee),
837 cgraph_edge_badness (edge));
838 if (edge->count)
839 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
841 gcc_assert (edge->aux);
842 edge->aux = NULL;
843 if (!edge->inline_failed)
844 continue;
846 /* When not having profile info ready we don't weight by any way the
847 position of call in procedure itself. This means if call of
848 function A from function B seems profitable to inline, the recursive
849 call of function A in inline copy of A in B will look profitable too
850 and we end up inlining until reaching maximal function growth. This
851 is not good idea so prohibit the recursive inlining.
853 ??? When the frequencies are taken into account we might not need this
854 restriction. */
855 if (!max_count)
857 where = edge->caller;
858 while (where->global.inlined_to)
860 if (where->decl == edge->callee->decl)
861 break;
862 where = where->callers->caller;
864 if (where->global.inlined_to)
866 edge->inline_failed
867 = (edge->callee->local.disregard_inline_limits ? N_("recursive inlining") : "");
868 if (dump_file)
869 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
870 continue;
874 if (!cgraph_maybe_hot_edge_p (edge) && growth > 0)
876 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
877 &edge->inline_failed))
879 edge->inline_failed =
880 N_("call is unlikely");
881 if (dump_file)
882 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
884 continue;
886 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
888 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
889 &edge->inline_failed))
891 if (dump_file)
892 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
894 continue;
896 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
897 &edge->inline_failed))
899 where = edge->caller;
900 if (where->global.inlined_to)
901 where = where->global.inlined_to;
902 if (!cgraph_decide_recursive_inlining (where))
903 continue;
904 update_callee_keys (heap, where, updated_nodes);
906 else
908 struct cgraph_node *callee;
909 if (!cgraph_check_inline_limits (edge->caller, edge->callee,
910 &edge->inline_failed, true))
912 if (dump_file)
913 fprintf (dump_file, " Not inlining into %s:%s.\n",
914 cgraph_node_name (edge->caller), edge->inline_failed);
915 continue;
917 callee = edge->callee;
918 cgraph_mark_inline_edge (edge, true);
919 update_callee_keys (heap, callee, updated_nodes);
921 where = edge->caller;
922 if (where->global.inlined_to)
923 where = where->global.inlined_to;
925 /* Our profitability metric can depend on local properties
926 such as number of inlinable calls and size of the function body.
927 After inlining these properties might change for the function we
928 inlined into (since it's body size changed) and for the functions
929 called by function we inlined (since number of it inlinable callers
930 might change). */
931 update_caller_keys (heap, where, updated_nodes);
932 bitmap_clear (updated_nodes);
934 if (dump_file)
936 fprintf (dump_file,
937 " Inlined into %s which now has %i insns,"
938 "net change of %+i insns.\n",
939 cgraph_node_name (edge->caller),
940 edge->caller->global.insns,
941 overall_insns - old_insns);
943 if (min_insns > overall_insns)
945 min_insns = overall_insns;
946 max_insns = compute_max_insns (min_insns);
948 if (dump_file)
949 fprintf (dump_file, "New minimal insns reached: %i\n", min_insns);
952 while ((edge = fibheap_extract_min (heap)) != NULL)
954 gcc_assert (edge->aux);
955 edge->aux = NULL;
956 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
957 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
958 &edge->inline_failed))
959 edge->inline_failed = N_("--param inline-unit-growth limit reached");
961 fibheap_delete (heap);
962 BITMAP_FREE (updated_nodes);
965 /* Decide on the inlining. We do so in the topological order to avoid
966 expenses on updating data structures. */
968 static unsigned int
969 cgraph_decide_inlining (void)
971 struct cgraph_node *node;
972 int nnodes;
973 struct cgraph_node **order =
974 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
975 int old_insns = 0;
976 int i;
977 int initial_insns = 0;
979 max_count = 0;
980 for (node = cgraph_nodes; node; node = node->next)
981 if (node->analyzed && (node->needed || node->reachable))
983 struct cgraph_edge *e;
985 initial_insns += node->local.self_insns;
986 gcc_assert (node->local.self_insns == node->global.insns);
987 for (e = node->callees; e; e = e->next_callee)
988 if (max_count < e->count)
989 max_count = e->count;
991 overall_insns = initial_insns;
992 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
994 nnodes = cgraph_postorder (order);
996 if (dump_file)
997 fprintf (dump_file,
998 "\nDeciding on inlining. Starting with %i insns.\n",
999 initial_insns);
1001 for (node = cgraph_nodes; node; node = node->next)
1002 node->aux = 0;
1004 if (dump_file)
1005 fprintf (dump_file, "\nInlining always_inline functions:\n");
1007 /* In the first pass mark all always_inline edges. Do this with a priority
1008 so none of our later choices will make this impossible. */
1009 for (i = nnodes - 1; i >= 0; i--)
1011 struct cgraph_edge *e, *next;
1013 node = order[i];
1015 /* Handle nodes to be flattened, but don't update overall unit size. */
1016 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1018 if (dump_file)
1019 fprintf (dump_file,
1020 "Flattening %s\n", cgraph_node_name (node));
1021 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1024 if (!node->local.disregard_inline_limits)
1025 continue;
1026 if (dump_file)
1027 fprintf (dump_file,
1028 "\nConsidering %s %i insns (always inline)\n",
1029 cgraph_node_name (node), node->global.insns);
1030 old_insns = overall_insns;
1031 for (e = node->callers; e; e = next)
1033 next = e->next_caller;
1034 if (!e->inline_failed)
1035 continue;
1036 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1037 &e->inline_failed))
1038 continue;
1039 cgraph_mark_inline_edge (e, true);
1040 if (dump_file)
1041 fprintf (dump_file,
1042 " Inlined into %s which now has %i insns.\n",
1043 cgraph_node_name (e->caller),
1044 e->caller->global.insns);
1046 /* Inlining self recursive function might introduce new calls to
1047 themselves we didn't see in the loop above. Fill in the proper
1048 reason why inline failed. */
1049 for (e = node->callers; e; e = e->next_caller)
1050 if (e->inline_failed)
1051 e->inline_failed = N_("recursive inlining");
1052 if (dump_file)
1053 fprintf (dump_file,
1054 " Inlined for a net change of %+i insns.\n",
1055 overall_insns - old_insns);
1058 if (!flag_really_no_inline)
1059 cgraph_decide_inlining_of_small_functions ();
1061 if (!flag_really_no_inline
1062 && flag_inline_functions_called_once)
1064 if (dump_file)
1065 fprintf (dump_file, "\nDeciding on functions called once:\n");
1067 /* And finally decide what functions are called once. */
1069 for (i = nnodes - 1; i >= 0; i--)
1071 node = order[i];
1073 if (node->callers && !node->callers->next_caller && !node->needed
1074 && node->local.inlinable && node->callers->inline_failed
1075 && !DECL_EXTERNAL (node->decl) && !DECL_COMDAT (node->decl))
1077 if (dump_file)
1079 fprintf (dump_file,
1080 "\nConsidering %s %i insns.\n",
1081 cgraph_node_name (node), node->global.insns);
1082 fprintf (dump_file,
1083 " Called once from %s %i insns.\n",
1084 cgraph_node_name (node->callers->caller),
1085 node->callers->caller->global.insns);
1088 old_insns = overall_insns;
1090 if (cgraph_check_inline_limits (node->callers->caller, node,
1091 NULL, false))
1093 cgraph_mark_inline (node->callers);
1094 if (dump_file)
1095 fprintf (dump_file,
1096 " Inlined into %s which now has %i insns"
1097 " for a net change of %+i insns.\n",
1098 cgraph_node_name (node->callers->caller),
1099 node->callers->caller->global.insns,
1100 overall_insns - old_insns);
1102 else
1104 if (dump_file)
1105 fprintf (dump_file,
1106 " Inline limit reached, not inlined.\n");
1112 if (dump_file)
1113 fprintf (dump_file,
1114 "\nInlined %i calls, eliminated %i functions, "
1115 "%i insns turned to %i insns.\n\n",
1116 ncalls_inlined, nfunctions_inlined, initial_insns,
1117 overall_insns);
1118 free (order);
1119 return 0;
1122 /* Try to inline edge E from incremental inliner. MODE specifies mode
1123 of inliner.
1125 We are detecting cycles by storing mode of inliner into cgraph_node last
1126 time we visited it in the recursion. In general when mode is set, we have
1127 recursive inlining, but as an special case, we want to try harder inline
1128 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1129 flatten, b being always inline. Flattening 'a' will collapse
1130 a->b->c before hitting cycle. To accommodate always inline, we however
1131 need to inline a->b->c->b.
1133 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1134 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1135 static bool
1136 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1138 struct cgraph_node *callee = e->callee;
1139 enum inlining_mode callee_mode = (size_t) callee->aux;
1140 bool always_inline = e->callee->local.disregard_inline_limits;
1142 /* We've hit cycle? */
1143 if (callee_mode)
1145 /* It is first time we see it and we are not in ALWAY_INLINE only
1146 mode yet. and the function in question is always_inline. */
1147 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1149 if (dump_file)
1151 indent_to (dump_file, depth);
1152 fprintf (dump_file,
1153 "Hit cycle in %s, switching to always inline only.\n",
1154 cgraph_node_name (callee));
1156 mode = INLINE_ALWAYS_INLINE;
1158 /* Otherwise it is time to give up. */
1159 else
1161 if (dump_file)
1163 indent_to (dump_file, depth);
1164 fprintf (dump_file,
1165 "Not inlining %s into %s to avoid cycle.\n",
1166 cgraph_node_name (callee),
1167 cgraph_node_name (e->caller));
1169 e->inline_failed = (e->callee->local.disregard_inline_limits
1170 ? N_("recursive inlining") : "");
1171 return false;
1175 callee->aux = (void *)(size_t) mode;
1176 if (dump_file)
1178 indent_to (dump_file, depth);
1179 fprintf (dump_file, " Inlining %s into %s.\n",
1180 cgraph_node_name (e->callee),
1181 cgraph_node_name (e->caller));
1183 if (e->inline_failed)
1184 cgraph_mark_inline (e);
1186 /* In order to fully inline always_inline functions at -O0, we need to
1187 recurse here, since the inlined functions might not be processed by
1188 incremental inlining at all yet.
1190 Also flattening needs to be done recursively. */
1192 if (!flag_unit_at_a_time || mode == INLINE_ALL || always_inline)
1193 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1194 callee->aux = (void *)(size_t) callee_mode;
1195 return true;
1198 /* Decide on the inlining. We do so in the topological order to avoid
1199 expenses on updating data structures.
1200 DEPTH is depth of recursion, used only for debug output. */
1202 static bool
1203 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1204 enum inlining_mode mode,
1205 int depth)
1207 struct cgraph_edge *e;
1208 bool inlined = false;
1209 const char *failed_reason;
1210 enum inlining_mode old_mode;
1212 #ifdef ENABLE_CHECKING
1213 verify_cgraph_node (node);
1214 #endif
1216 old_mode = (size_t)node->aux;
1218 if (mode != INLINE_ALWAYS_INLINE
1219 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1221 if (dump_file)
1223 indent_to (dump_file, depth);
1224 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1226 mode = INLINE_ALL;
1229 node->aux = (void *)(size_t) mode;
1231 /* First of all look for always inline functions. */
1232 for (e = node->callees; e; e = e->next_callee)
1234 if (!e->callee->local.disregard_inline_limits
1235 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1236 continue;
1237 /* When the edge is already inlined, we just need to recurse into
1238 it in order to fully flatten the leaves. */
1239 if (!e->inline_failed && mode == INLINE_ALL)
1241 inlined |= try_inline (e, mode, depth);
1242 continue;
1244 if (dump_file)
1246 indent_to (dump_file, depth);
1247 fprintf (dump_file,
1248 "Considering to always inline inline candidate %s.\n",
1249 cgraph_node_name (e->callee));
1251 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1253 if (dump_file)
1255 indent_to (dump_file, depth);
1256 fprintf (dump_file, "Not inlining: recursive call.\n");
1258 continue;
1260 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1261 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1263 if (dump_file)
1265 indent_to (dump_file, depth);
1266 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1268 continue;
1270 if (!DECL_SAVED_TREE (e->callee->decl) && !e->callee->inline_decl)
1272 if (dump_file)
1274 indent_to (dump_file, depth);
1275 fprintf (dump_file,
1276 "Not inlining: Function body no longer available.\n");
1278 continue;
1280 inlined |= try_inline (e, mode, depth);
1283 /* Now do the automatic inlining. */
1284 if (!flag_really_no_inline && mode != INLINE_ALL
1285 && mode != INLINE_ALWAYS_INLINE)
1286 for (e = node->callees; e; e = e->next_callee)
1288 if (!e->callee->local.inlinable
1289 || !e->inline_failed
1290 || e->callee->local.disregard_inline_limits)
1291 continue;
1292 if (dump_file)
1293 fprintf (dump_file, "Considering inline candidate %s.\n",
1294 cgraph_node_name (e->callee));
1295 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1297 if (dump_file)
1299 indent_to (dump_file, depth);
1300 fprintf (dump_file, "Not inlining: recursive call.\n");
1302 continue;
1304 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1305 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1307 if (dump_file)
1309 indent_to (dump_file, depth);
1310 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1312 continue;
1314 /* When the function body would grow and inlining the function won't
1315 elliminate the need for offline copy of the function, don't inline.
1317 if (mode == INLINE_SIZE
1318 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1319 > e->caller->global.insns)
1320 && cgraph_estimate_growth (e->callee) > 0)
1322 if (dump_file)
1324 indent_to (dump_file, depth);
1325 fprintf (dump_file,
1326 "Not inlining: code size would grow by %i insns.\n",
1327 cgraph_estimate_size_after_inlining (1, e->caller,
1328 e->callee)
1329 - e->caller->global.insns);
1331 continue;
1333 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1334 false))
1336 if (dump_file)
1338 indent_to (dump_file, depth);
1339 fprintf (dump_file, "Not inlining: %s.\n", e->inline_failed);
1341 continue;
1343 if (!DECL_SAVED_TREE (e->callee->decl) && !e->callee->inline_decl)
1345 if (dump_file)
1347 indent_to (dump_file, depth);
1348 fprintf (dump_file,
1349 "Not inlining: Function body no longer available.\n");
1351 continue;
1353 if (cgraph_default_inline_p (e->callee, &failed_reason))
1354 inlined |= try_inline (e, mode, depth);
1355 else if (!flag_unit_at_a_time)
1356 e->inline_failed = failed_reason;
1358 node->aux = (void *)(size_t) old_mode;
1359 return inlined;
1362 /* When inlining shall be performed. */
1363 static bool
1364 cgraph_gate_inlining (void)
1366 return flag_inline_trees;
1369 struct tree_opt_pass pass_ipa_inline =
1371 "inline", /* name */
1372 cgraph_gate_inlining, /* gate */
1373 cgraph_decide_inlining, /* execute */
1374 NULL, /* sub */
1375 NULL, /* next */
1376 0, /* static_pass_number */
1377 TV_INLINE_HEURISTICS, /* tv_id */
1378 0, /* properties_required */
1379 PROP_cfg, /* properties_provided */
1380 0, /* properties_destroyed */
1381 TODO_remove_functions, /* todo_flags_finish */
1382 TODO_dump_cgraph | TODO_dump_func
1383 | TODO_remove_functions, /* todo_flags_finish */
1384 0 /* letter */
1387 /* Because inlining might remove no-longer reachable nodes, we need to
1388 keep the array visible to garbage collector to avoid reading collected
1389 out nodes. */
1390 static int nnodes;
1391 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1393 /* Do inlining of small functions. Doing so early helps profiling and other
1394 passes to be somewhat more effective and avoids some code duplication in
1395 later real inlining pass for testcases with very many function calls. */
1396 static unsigned int
1397 cgraph_early_inlining (void)
1399 struct cgraph_node *node = cgraph_node (current_function_decl);
1400 unsigned int todo = 0;
1402 if (sorrycount || errorcount)
1403 return 0;
1404 if (cgraph_decide_inlining_incrementally (node,
1405 flag_unit_at_a_time
1406 ? INLINE_SIZE : INLINE_SPEED, 0))
1408 timevar_push (TV_INTEGRATION);
1409 todo = optimize_inline_calls (current_function_decl);
1410 timevar_pop (TV_INTEGRATION);
1412 return todo;
1415 /* When inlining shall be performed. */
1416 static bool
1417 cgraph_gate_early_inlining (void)
1419 return flag_inline_trees && flag_early_inlining;
1422 struct tree_opt_pass pass_early_inline =
1424 "einline", /* name */
1425 cgraph_gate_early_inlining, /* gate */
1426 cgraph_early_inlining, /* execute */
1427 NULL, /* sub */
1428 NULL, /* next */
1429 0, /* static_pass_number */
1430 TV_INLINE_HEURISTICS, /* tv_id */
1431 0, /* properties_required */
1432 PROP_cfg, /* properties_provided */
1433 0, /* properties_destroyed */
1434 0, /* todo_flags_start */
1435 TODO_dump_func, /* todo_flags_finish */
1436 0 /* letter */
1439 /* When inlining shall be performed. */
1440 static bool
1441 cgraph_gate_ipa_early_inlining (void)
1443 return (flag_inline_trees && flag_early_inlining
1444 && (flag_branch_probabilities || flag_test_coverage
1445 || profile_arc_flag));
1448 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1449 before tree profiling so we have stand alone IPA pass for doing so. */
1450 struct tree_opt_pass pass_ipa_early_inline =
1452 "einline_ipa", /* name */
1453 cgraph_gate_ipa_early_inlining, /* gate */
1454 NULL, /* execute */
1455 NULL, /* sub */
1456 NULL, /* next */
1457 0, /* static_pass_number */
1458 TV_INLINE_HEURISTICS, /* tv_id */
1459 0, /* properties_required */
1460 PROP_cfg, /* properties_provided */
1461 0, /* properties_destroyed */
1462 0, /* todo_flags_start */
1463 TODO_dump_cgraph, /* todo_flags_finish */
1464 0 /* letter */
1467 /* Compute parameters of functions used by inliner. */
1468 static unsigned int
1469 compute_inline_parameters (void)
1471 struct cgraph_node *node = cgraph_node (current_function_decl);
1473 gcc_assert (!node->global.inlined_to);
1474 node->local.estimated_self_stack_size = estimated_stack_frame_size ();
1475 node->global.estimated_stack_size = node->local.estimated_self_stack_size;
1476 node->global.stack_frame_offset = 0;
1477 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1478 node->local.self_insns = estimate_num_insns (current_function_decl,
1479 &eni_inlining_weights);
1480 if (node->local.inlinable)
1481 node->local.disregard_inline_limits
1482 = lang_hooks.tree_inlining.disregard_inline_limits (current_function_decl);
1483 if (flag_really_no_inline && !node->local.disregard_inline_limits)
1484 node->local.inlinable = 0;
1485 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1486 node->global.insns = node->local.self_insns;
1487 return 0;
1490 /* When inlining shall be performed. */
1491 static bool
1492 gate_inline_passes (void)
1494 return flag_inline_trees;
1497 struct tree_opt_pass pass_inline_parameters =
1499 NULL, /* name */
1500 gate_inline_passes, /* gate */
1501 compute_inline_parameters, /* execute */
1502 NULL, /* sub */
1503 NULL, /* next */
1504 0, /* static_pass_number */
1505 TV_INLINE_HEURISTICS, /* tv_id */
1506 0, /* properties_required */
1507 PROP_cfg, /* properties_provided */
1508 0, /* properties_destroyed */
1509 0, /* todo_flags_start */
1510 0, /* todo_flags_finish */
1511 0 /* letter */
1514 /* Apply inline plan to the function. */
1515 static unsigned int
1516 apply_inline (void)
1518 unsigned int todo = 0;
1519 struct cgraph_edge *e;
1520 struct cgraph_node *node = cgraph_node (current_function_decl);
1522 /* Even when not optimizing, ensure that always_inline functions get inlined.
1524 if (!optimize)
1525 cgraph_decide_inlining_incrementally (node, INLINE_SPEED, 0);
1527 /* We might need the body of this function so that we can expand
1528 it inline somewhere else. */
1529 if (cgraph_preserve_function_body_p (current_function_decl))
1530 save_inline_function_body (node);
1532 for (e = node->callees; e; e = e->next_callee)
1533 if (!e->inline_failed || warn_inline)
1534 break;
1535 if (e)
1537 timevar_push (TV_INTEGRATION);
1538 todo = optimize_inline_calls (current_function_decl);
1539 timevar_pop (TV_INTEGRATION);
1541 /* In non-unit-at-a-time we must mark all referenced functions as needed. */
1542 if (!flag_unit_at_a_time)
1544 struct cgraph_edge *e;
1545 for (e = node->callees; e; e = e->next_callee)
1546 if (e->callee->analyzed)
1547 cgraph_mark_needed_node (e->callee);
1549 return todo | execute_fixup_cfg ();
1552 struct tree_opt_pass pass_apply_inline =
1554 "apply_inline", /* name */
1555 NULL, /* gate */
1556 apply_inline, /* execute */
1557 NULL, /* sub */
1558 NULL, /* next */
1559 0, /* static_pass_number */
1560 TV_INLINE_HEURISTICS, /* tv_id */
1561 0, /* properties_required */
1562 PROP_cfg, /* properties_provided */
1563 0, /* properties_destroyed */
1564 0, /* todo_flags_start */
1565 TODO_dump_func | TODO_verify_flow
1566 | TODO_verify_stmts, /* todo_flags_finish */
1567 0 /* letter */
1570 #include "gt-ipa-inline.h"