EnumSet*.class: Regenerate
[official-gcc.git] / gcc / ipa-inline.c
blob647ec9faded353765b303b1fe8fa4634b96799ae
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
26 callgraph.
28 There are three major parts of this file:
30 cgraph_mark_inline implementation
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
34 statistics)
36 inlining heuristics limits
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
40 growth and so on).
42 inlining heuristics
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used in non-unit-at-a-time mode.
65 The inliner itself is split into several passes:
67 pass_inline_parameters
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
76 on the function body.
78 pass_early_inlining
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
83 whole unit.
85 This is the main inlining pass in non-unit-at-a-time.
87 With unit-at-a-time the pass is run during conversion into SSA form.
88 Only functions already converted into SSA form are inlined, so the
89 conversion must happen in topological order on the callgraph (that is
90 maintained by pass manager). The functions after inlining are early
91 optimized so the early inliner sees unoptimized function itself, but
92 all considered callees are already optimized allowing it to unfold
93 abstraction penalty on C++ effectively and cheaply.
95 pass_ipa_early_inlining
97 With profiling, the early inlining is also necessary to reduce
98 instrumentation costs on program with high abstraction penalty (doing
99 many redundant calls). This can't happen in parallel with early
100 optimization and profile instrumentation, because we would end up
101 re-instrumenting already instrumented function bodies we brought in via
102 inlining.
104 To avoid this, this pass is executed as IPA pass before profiling. It is
105 simple wrapper to pass_early_inlining and ensures first inlining.
107 pass_ipa_inline
109 This is the main pass implementing simple greedy algorithm to do inlining
110 of small functions that results in overall growth of compilation unit and
111 inlining of functions called once. The pass compute just so called inline
112 plan (representation of inlining to be done in callgraph) and unlike early
113 inlining it is not performing the inlining itself.
115 pass_apply_inline
117 This pass performs actual inlining according to pass_ipa_inline on given
118 function. Possible the function body before inlining is saved when it is
119 needed for further inlining later.
122 #include "config.h"
123 #include "system.h"
124 #include "coretypes.h"
125 #include "tm.h"
126 #include "tree.h"
127 #include "tree-inline.h"
128 #include "langhooks.h"
129 #include "flags.h"
130 #include "cgraph.h"
131 #include "diagnostic.h"
132 #include "timevar.h"
133 #include "params.h"
134 #include "fibheap.h"
135 #include "intl.h"
136 #include "tree-pass.h"
137 #include "hashtab.h"
138 #include "coverage.h"
139 #include "ggc.h"
140 #include "tree-flow.h"
141 #include "rtl.h"
143 /* Mode incremental inliner operate on:
145 In ALWAYS_INLINE only functions marked
146 always_inline are inlined. This mode is used after detecting cycle during
147 flattening.
149 In SIZE mode, only functions that reduce function body size after inlining
150 are inlined, this is used during early inlining.
152 In SPEED mode, all small functions are inlined. This might result in
153 unbounded growth of compilation unit and is used only in non-unit-at-a-time
154 mode.
156 in ALL mode, everything is inlined. This is used during flattening. */
157 enum inlining_mode {
158 INLINE_NONE = 0,
159 INLINE_ALWAYS_INLINE,
160 INLINE_SIZE,
161 INLINE_SPEED,
162 INLINE_ALL
164 static bool
165 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
166 int);
169 /* Statistics we collect about inlining algorithm. */
170 static int ncalls_inlined;
171 static int nfunctions_inlined;
172 static int overall_insns;
173 static gcov_type max_count;
175 /* Estimate size of the function after inlining WHAT into TO. */
177 static int
178 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
179 struct cgraph_node *what)
181 int size;
182 tree fndecl = what->decl, arg;
183 int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
185 for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
186 call_insns += estimate_move_cost (TREE_TYPE (arg));
187 size = (what->global.insns - call_insns) * times + to->global.insns;
188 gcc_assert (size >= 0);
189 return size;
192 /* E is expected to be an edge being inlined. Clone destination node of
193 the edge and redirect it to the new clone.
194 DUPLICATE is used for bookkeeping on whether we are actually creating new
195 clones or re-using node originally representing out-of-line function call.
197 void
198 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, bool update_original)
200 HOST_WIDE_INT peak;
201 if (duplicate)
203 /* We may eliminate the need for out-of-line copy to be output.
204 In that case just go ahead and re-use it. */
205 if (!e->callee->callers->next_caller
206 && !e->callee->needed
207 && !cgraph_new_nodes
208 && flag_unit_at_a_time)
210 gcc_assert (!e->callee->global.inlined_to);
211 if (DECL_SAVED_TREE (e->callee->decl))
212 overall_insns -= e->callee->global.insns, nfunctions_inlined++;
213 duplicate = false;
215 else
217 struct cgraph_node *n;
218 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
219 update_original);
220 cgraph_redirect_edge_callee (e, n);
224 if (e->caller->global.inlined_to)
225 e->callee->global.inlined_to = e->caller->global.inlined_to;
226 else
227 e->callee->global.inlined_to = e->caller;
228 e->callee->global.stack_frame_offset
229 = e->caller->global.stack_frame_offset + e->caller->local.estimated_self_stack_size;
230 peak = e->callee->global.stack_frame_offset + e->callee->local.estimated_self_stack_size;
231 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
232 e->callee->global.inlined_to->global.estimated_stack_size = peak;
234 /* Recursively clone all bodies. */
235 for (e = e->callee->callees; e; e = e->next_callee)
236 if (!e->inline_failed)
237 cgraph_clone_inlined_nodes (e, duplicate, update_original);
240 /* Mark edge E as inlined and update callgraph accordingly.
241 UPDATE_ORIGINAL specify whether profile of original function should be
242 updated. */
244 void
245 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original)
247 int old_insns = 0, new_insns = 0;
248 struct cgraph_node *to = NULL, *what;
250 if (e->callee->inline_decl)
251 cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
253 gcc_assert (e->inline_failed);
254 e->inline_failed = NULL;
256 if (!e->callee->global.inlined && flag_unit_at_a_time)
257 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
258 e->callee->global.inlined = true;
260 cgraph_clone_inlined_nodes (e, true, update_original);
262 what = e->callee;
264 /* Now update size of caller and all functions caller is inlined into. */
265 for (;e && !e->inline_failed; e = e->caller->callers)
267 old_insns = e->caller->global.insns;
268 new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
269 what);
270 gcc_assert (new_insns >= 0);
271 to = e->caller;
272 to->global.insns = new_insns;
274 gcc_assert (what->global.inlined_to == to);
275 if (new_insns > old_insns)
276 overall_insns += new_insns - old_insns;
277 ncalls_inlined++;
280 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
281 Return following unredirected edge in the list of callers
282 of EDGE->CALLEE */
284 static struct cgraph_edge *
285 cgraph_mark_inline (struct cgraph_edge *edge)
287 struct cgraph_node *to = edge->caller;
288 struct cgraph_node *what = edge->callee;
289 struct cgraph_edge *e, *next;
291 gcc_assert (!CALL_CANNOT_INLINE_P (edge->call_stmt));
292 /* Look for all calls, mark them inline and clone recursively
293 all inlined functions. */
294 for (e = what->callers; e; e = next)
296 next = e->next_caller;
297 if (e->caller == to && e->inline_failed)
299 cgraph_mark_inline_edge (e, true);
300 if (e == edge)
301 edge = next;
305 return edge;
308 /* Estimate the growth caused by inlining NODE into all callees. */
310 static int
311 cgraph_estimate_growth (struct cgraph_node *node)
313 int growth = 0;
314 struct cgraph_edge *e;
315 if (node->global.estimated_growth != INT_MIN)
316 return node->global.estimated_growth;
318 for (e = node->callers; e; e = e->next_caller)
319 if (e->inline_failed)
320 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
321 - e->caller->global.insns);
323 /* ??? Wrong for self recursive functions or cases where we decide to not
324 inline for different reasons, but it is not big deal as in that case
325 we will keep the body around, but we will also avoid some inlining. */
326 if (!node->needed && !DECL_EXTERNAL (node->decl))
327 growth -= node->global.insns;
329 node->global.estimated_growth = growth;
330 return growth;
333 /* Return false when inlining WHAT into TO is not good idea
334 as it would cause too large growth of function bodies.
335 When ONE_ONLY is true, assume that only one call site is going
336 to be inlined, otherwise figure out how many call sites in
337 TO calls WHAT and verify that all can be inlined.
340 static bool
341 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
342 const char **reason, bool one_only)
344 int times = 0;
345 struct cgraph_edge *e;
346 int newsize;
347 int limit;
348 HOST_WIDE_INT stack_size_limit, inlined_stack;
350 if (one_only)
351 times = 1;
352 else
353 for (e = to->callees; e; e = e->next_callee)
354 if (e->callee == what)
355 times++;
357 if (to->global.inlined_to)
358 to = to->global.inlined_to;
360 /* When inlining large function body called once into small function,
361 take the inlined function as base for limiting the growth. */
362 if (to->local.self_insns > what->local.self_insns)
363 limit = to->local.self_insns;
364 else
365 limit = what->local.self_insns;
367 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
369 /* Check the size after inlining against the function limits. But allow
370 the function to shrink if it went over the limits by forced inlining. */
371 newsize = cgraph_estimate_size_after_inlining (times, to, what);
372 if (newsize >= to->global.insns
373 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
374 && newsize > limit)
376 if (reason)
377 *reason = N_("--param large-function-growth limit reached");
378 return false;
381 stack_size_limit = to->local.estimated_self_stack_size;
383 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
385 inlined_stack = (to->global.stack_frame_offset
386 + to->local.estimated_self_stack_size
387 + what->global.estimated_stack_size);
388 if (inlined_stack > stack_size_limit
389 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
391 if (reason)
392 *reason = N_("--param large-stack-frame-growth limit reached");
393 return false;
395 return true;
398 /* Return true when function N is small enough to be inlined. */
400 bool
401 cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
403 tree decl = n->decl;
405 if (n->inline_decl)
406 decl = n->inline_decl;
407 if (!DECL_INLINE (decl))
409 if (reason)
410 *reason = N_("function not inlinable");
411 return false;
414 if (!DECL_STRUCT_FUNCTION (decl)->cfg)
416 if (reason)
417 *reason = N_("function body not available");
418 return false;
421 if (DECL_DECLARED_INLINE_P (decl))
423 if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
425 if (reason)
426 *reason = N_("--param max-inline-insns-single limit reached");
427 return false;
430 else
432 if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
434 if (reason)
435 *reason = N_("--param max-inline-insns-auto limit reached");
436 return false;
440 return true;
443 /* Return true when inlining WHAT would create recursive inlining.
444 We call recursive inlining all cases where same function appears more than
445 once in the single recursion nest path in the inline graph. */
447 static bool
448 cgraph_recursive_inlining_p (struct cgraph_node *to,
449 struct cgraph_node *what,
450 const char **reason)
452 bool recursive;
453 if (to->global.inlined_to)
454 recursive = what->decl == to->global.inlined_to->decl;
455 else
456 recursive = what->decl == to->decl;
457 /* Marking recursive function inline has sane semantic and thus we should
458 not warn on it. */
459 if (recursive && reason)
460 *reason = (what->local.disregard_inline_limits
461 ? N_("recursive inlining") : "");
462 return recursive;
465 /* Return true if the call can be hot. */
466 static bool
467 cgraph_maybe_hot_edge_p (struct cgraph_edge *edge)
469 if (profile_info && flag_branch_probabilities
470 && (edge->count
471 <= profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION)))
472 return false;
473 if (lookup_attribute ("cold", DECL_ATTRIBUTES (edge->callee->decl))
474 || lookup_attribute ("cold", DECL_ATTRIBUTES (edge->caller->decl)))
475 return false;
476 if (lookup_attribute ("hot", DECL_ATTRIBUTES (edge->caller->decl)))
477 return true;
478 if (flag_guess_branch_prob
479 && edge->frequency < (CGRAPH_FREQ_MAX
480 / PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)))
481 return false;
482 return true;
485 /* A cost model driving the inlining heuristics in a way so the edges with
486 smallest badness are inlined first. After each inlining is performed
487 the costs of all caller edges of nodes affected are recomputed so the
488 metrics may accurately depend on values such as number of inlinable callers
489 of the function or function body size. */
491 static int
492 cgraph_edge_badness (struct cgraph_edge *edge)
494 int badness;
495 int growth =
496 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
498 growth -= edge->caller->global.insns;
500 /* Always prefer inlining saving code size. */
501 if (growth <= 0)
502 badness = INT_MIN - growth;
504 /* When profiling is available, base priorities -(#calls / growth).
505 So we optimize for overall number of "executed" inlined calls. */
506 else if (max_count)
507 badness = ((int)((double)edge->count * INT_MIN / max_count)) / growth;
509 /* When function local profile is available, base priorities on
510 growth / frequency, so we optimize for overall frequency of inlined
511 calls. This is not too accurate since while the call might be frequent
512 within function, the function itself is infrequent.
514 Other objective to optimize for is number of different calls inlined.
515 We add the estimated growth after inlining all functions to biass the
516 priorities slightly in this direction (so fewer times called functions
517 of the same size gets priority). */
518 else if (flag_guess_branch_prob)
520 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE;
521 int growth =
522 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
523 growth -= edge->caller->global.insns;
524 badness = growth * 256;
526 /* Decrease badness if call is nested. */
527 /* Compress the range so we don't overflow. */
528 if (div > 256)
529 div = 256 + ceil_log2 (div) - 8;
530 if (div < 1)
531 div = 1;
532 if (badness > 0)
533 badness /= div;
534 badness += cgraph_estimate_growth (edge->callee);
536 /* When function local profile is not available or it does not give
537 useful information (ie frequency is zero), base the cost on
538 loop nest and overall size growth, so we optimize for overall number
539 of functions fully inlined in program. */
540 else
542 int nest = MIN (edge->loop_nest, 8);
543 badness = cgraph_estimate_growth (edge->callee) * 256;
545 /* Decrease badness if call is nested. */
546 if (badness > 0)
547 badness >>= nest;
548 else
550 badness <<= nest;
553 /* Make recursive inlining happen always after other inlining is done. */
554 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
555 return badness + 1;
556 else
557 return badness;
560 /* Recompute heap nodes for each of caller edge. */
562 static void
563 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
564 bitmap updated_nodes)
566 struct cgraph_edge *edge;
567 const char *failed_reason;
569 if (!node->local.inlinable || node->local.disregard_inline_limits
570 || node->global.inlined_to)
571 return;
572 if (bitmap_bit_p (updated_nodes, node->uid))
573 return;
574 bitmap_set_bit (updated_nodes, node->uid);
575 node->global.estimated_growth = INT_MIN;
577 if (!node->local.inlinable)
578 return;
579 /* Prune out edges we won't inline into anymore. */
580 if (!cgraph_default_inline_p (node, &failed_reason))
582 for (edge = node->callers; edge; edge = edge->next_caller)
583 if (edge->aux)
585 fibheap_delete_node (heap, (fibnode_t) edge->aux);
586 edge->aux = NULL;
587 if (edge->inline_failed)
588 edge->inline_failed = failed_reason;
590 return;
593 for (edge = node->callers; edge; edge = edge->next_caller)
594 if (edge->inline_failed)
596 int badness = cgraph_edge_badness (edge);
597 if (edge->aux)
599 fibnode_t n = (fibnode_t) edge->aux;
600 gcc_assert (n->data == edge);
601 if (n->key == badness)
602 continue;
604 /* fibheap_replace_key only increase the keys. */
605 if (fibheap_replace_key (heap, n, badness))
606 continue;
607 fibheap_delete_node (heap, (fibnode_t) edge->aux);
609 edge->aux = fibheap_insert (heap, badness, edge);
613 /* Recompute heap nodes for each of caller edges of each of callees. */
615 static void
616 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
617 bitmap updated_nodes)
619 struct cgraph_edge *e;
620 node->global.estimated_growth = INT_MIN;
622 for (e = node->callees; e; e = e->next_callee)
623 if (e->inline_failed)
624 update_caller_keys (heap, e->callee, updated_nodes);
625 else if (!e->inline_failed)
626 update_callee_keys (heap, e->callee, updated_nodes);
629 /* Enqueue all recursive calls from NODE into priority queue depending on
630 how likely we want to recursively inline the call. */
632 static void
633 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
634 fibheap_t heap)
636 static int priority;
637 struct cgraph_edge *e;
638 for (e = where->callees; e; e = e->next_callee)
639 if (e->callee == node)
641 /* When profile feedback is available, prioritize by expected number
642 of calls. Without profile feedback we maintain simple queue
643 to order candidates via recursive depths. */
644 fibheap_insert (heap,
645 !max_count ? priority++
646 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
649 for (e = where->callees; e; e = e->next_callee)
650 if (!e->inline_failed)
651 lookup_recursive_calls (node, e->callee, heap);
654 /* Decide on recursive inlining: in the case function has recursive calls,
655 inline until body size reaches given argument. */
657 static bool
658 cgraph_decide_recursive_inlining (struct cgraph_node *node)
660 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
661 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
662 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
663 fibheap_t heap;
664 struct cgraph_edge *e;
665 struct cgraph_node *master_clone, *next;
666 int depth = 0;
667 int n = 0;
669 if (optimize_size)
670 return false;
672 if (DECL_DECLARED_INLINE_P (node->decl))
674 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
675 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
678 /* Make sure that function is small enough to be considered for inlining. */
679 if (!max_depth
680 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
681 return false;
682 heap = fibheap_new ();
683 lookup_recursive_calls (node, node, heap);
684 if (fibheap_empty (heap))
686 fibheap_delete (heap);
687 return false;
690 if (dump_file)
691 fprintf (dump_file,
692 " Performing recursive inlining on %s\n",
693 cgraph_node_name (node));
695 /* We need original clone to copy around. */
696 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1, false);
697 master_clone->needed = true;
698 for (e = master_clone->callees; e; e = e->next_callee)
699 if (!e->inline_failed)
700 cgraph_clone_inlined_nodes (e, true, false);
702 /* Do the inlining and update list of recursive call during process. */
703 while (!fibheap_empty (heap)
704 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
705 <= limit))
707 struct cgraph_edge *curr
708 = (struct cgraph_edge *) fibheap_extract_min (heap);
709 struct cgraph_node *cnode;
711 depth = 1;
712 for (cnode = curr->caller;
713 cnode->global.inlined_to; cnode = cnode->callers->caller)
714 if (node->decl == curr->callee->decl)
715 depth++;
716 if (depth > max_depth)
718 if (dump_file)
719 fprintf (dump_file,
720 " maximal depth reached\n");
721 continue;
724 if (max_count)
726 if (!cgraph_maybe_hot_edge_p (curr))
728 if (dump_file)
729 fprintf (dump_file, " Not inlining cold call\n");
730 continue;
732 if (curr->count * 100 / node->count < probability)
734 if (dump_file)
735 fprintf (dump_file,
736 " Probability of edge is too small\n");
737 continue;
741 if (dump_file)
743 fprintf (dump_file,
744 " Inlining call of depth %i", depth);
745 if (node->count)
747 fprintf (dump_file, " called approx. %.2f times per call",
748 (double)curr->count / node->count);
750 fprintf (dump_file, "\n");
752 cgraph_redirect_edge_callee (curr, master_clone);
753 cgraph_mark_inline_edge (curr, false);
754 lookup_recursive_calls (node, curr->callee, heap);
755 n++;
757 if (!fibheap_empty (heap) && dump_file)
758 fprintf (dump_file, " Recursive inlining growth limit met.\n");
760 fibheap_delete (heap);
761 if (dump_file)
762 fprintf (dump_file,
763 "\n Inlined %i times, body grown from %i to %i insns\n", n,
764 master_clone->global.insns, node->global.insns);
766 /* Remove master clone we used for inlining. We rely that clones inlined
767 into master clone gets queued just before master clone so we don't
768 need recursion. */
769 for (node = cgraph_nodes; node != master_clone;
770 node = next)
772 next = node->next;
773 if (node->global.inlined_to == master_clone)
774 cgraph_remove_node (node);
776 cgraph_remove_node (master_clone);
777 /* FIXME: Recursive inlining actually reduces number of calls of the
778 function. At this place we should probably walk the function and
779 inline clones and compensate the counts accordingly. This probably
780 doesn't matter much in practice. */
781 return n > 0;
784 /* Set inline_failed for all callers of given function to REASON. */
786 static void
787 cgraph_set_inline_failed (struct cgraph_node *node, const char *reason)
789 struct cgraph_edge *e;
791 if (dump_file)
792 fprintf (dump_file, "Inlining failed: %s\n", reason);
793 for (e = node->callers; e; e = e->next_caller)
794 if (e->inline_failed)
795 e->inline_failed = reason;
798 /* Given whole compilation unit estimate of INSNS, compute how large we can
799 allow the unit to grow. */
800 static int
801 compute_max_insns (int insns)
803 int max_insns = insns;
804 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
805 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
807 return ((HOST_WIDEST_INT) max_insns
808 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
811 /* We use greedy algorithm for inlining of small functions:
812 All inline candidates are put into prioritized heap based on estimated
813 growth of the overall number of instructions and then update the estimates.
815 INLINED and INLINED_CALEES are just pointers to arrays large enough
816 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
818 static void
819 cgraph_decide_inlining_of_small_functions (void)
821 struct cgraph_node *node;
822 struct cgraph_edge *edge;
823 const char *failed_reason;
824 fibheap_t heap = fibheap_new ();
825 bitmap updated_nodes = BITMAP_ALLOC (NULL);
826 int min_insns, max_insns;
828 if (dump_file)
829 fprintf (dump_file, "\nDeciding on smaller functions:\n");
831 /* Put all inline candidates into the heap. */
833 for (node = cgraph_nodes; node; node = node->next)
835 if (!node->local.inlinable || !node->callers
836 || node->local.disregard_inline_limits)
837 continue;
838 if (dump_file)
839 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
841 node->global.estimated_growth = INT_MIN;
842 if (!cgraph_default_inline_p (node, &failed_reason))
844 cgraph_set_inline_failed (node, failed_reason);
845 continue;
848 for (edge = node->callers; edge; edge = edge->next_caller)
849 if (edge->inline_failed)
851 gcc_assert (!edge->aux);
852 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
856 max_insns = compute_max_insns (overall_insns);
857 min_insns = overall_insns;
859 while (overall_insns <= max_insns
860 && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
862 int old_insns = overall_insns;
863 struct cgraph_node *where;
864 int growth =
865 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
867 growth -= edge->caller->global.insns;
869 if (dump_file)
871 fprintf (dump_file,
872 "\nConsidering %s with %i insns\n",
873 cgraph_node_name (edge->callee),
874 edge->callee->global.insns);
875 fprintf (dump_file,
876 " to be inlined into %s\n"
877 " Estimated growth after inlined into all callees is %+i insns.\n"
878 " Estimated badness is %i, frequency %.2f.\n",
879 cgraph_node_name (edge->caller),
880 cgraph_estimate_growth (edge->callee),
881 cgraph_edge_badness (edge),
882 edge->frequency / (double)CGRAPH_FREQ_BASE);
883 if (edge->count)
884 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
886 gcc_assert (edge->aux);
887 edge->aux = NULL;
888 if (!edge->inline_failed)
889 continue;
891 /* When not having profile info ready we don't weight by any way the
892 position of call in procedure itself. This means if call of
893 function A from function B seems profitable to inline, the recursive
894 call of function A in inline copy of A in B will look profitable too
895 and we end up inlining until reaching maximal function growth. This
896 is not good idea so prohibit the recursive inlining.
898 ??? When the frequencies are taken into account we might not need this
899 restriction. */
900 if (!max_count)
902 where = edge->caller;
903 while (where->global.inlined_to)
905 if (where->decl == edge->callee->decl)
906 break;
907 where = where->callers->caller;
909 if (where->global.inlined_to)
911 edge->inline_failed
912 = (edge->callee->local.disregard_inline_limits ? N_("recursive inlining") : "");
913 if (dump_file)
914 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
915 continue;
919 if ((!cgraph_maybe_hot_edge_p (edge) || optimize_size) && growth > 0)
921 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
922 &edge->inline_failed))
924 edge->inline_failed =
925 N_("call is unlikely");
926 if (dump_file)
927 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
929 continue;
931 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
933 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
934 &edge->inline_failed))
936 if (dump_file)
937 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
939 continue;
941 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
942 &edge->inline_failed))
944 where = edge->caller;
945 if (where->global.inlined_to)
946 where = where->global.inlined_to;
947 if (!cgraph_decide_recursive_inlining (where))
948 continue;
949 update_callee_keys (heap, where, updated_nodes);
951 else
953 struct cgraph_node *callee;
954 if (CALL_CANNOT_INLINE_P (edge->call_stmt)
955 || !cgraph_check_inline_limits (edge->caller, edge->callee,
956 &edge->inline_failed, true))
958 if (dump_file)
959 fprintf (dump_file, " Not inlining into %s:%s.\n",
960 cgraph_node_name (edge->caller), edge->inline_failed);
961 continue;
963 callee = edge->callee;
964 cgraph_mark_inline_edge (edge, true);
965 update_callee_keys (heap, callee, updated_nodes);
967 where = edge->caller;
968 if (where->global.inlined_to)
969 where = where->global.inlined_to;
971 /* Our profitability metric can depend on local properties
972 such as number of inlinable calls and size of the function body.
973 After inlining these properties might change for the function we
974 inlined into (since it's body size changed) and for the functions
975 called by function we inlined (since number of it inlinable callers
976 might change). */
977 update_caller_keys (heap, where, updated_nodes);
978 bitmap_clear (updated_nodes);
980 if (dump_file)
982 fprintf (dump_file,
983 " Inlined into %s which now has %i insns,"
984 "net change of %+i insns.\n",
985 cgraph_node_name (edge->caller),
986 edge->caller->global.insns,
987 overall_insns - old_insns);
989 if (min_insns > overall_insns)
991 min_insns = overall_insns;
992 max_insns = compute_max_insns (min_insns);
994 if (dump_file)
995 fprintf (dump_file, "New minimal insns reached: %i\n", min_insns);
998 while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
1000 gcc_assert (edge->aux);
1001 edge->aux = NULL;
1002 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1003 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1004 &edge->inline_failed))
1005 edge->inline_failed = N_("--param inline-unit-growth limit reached");
1007 fibheap_delete (heap);
1008 BITMAP_FREE (updated_nodes);
1011 /* Decide on the inlining. We do so in the topological order to avoid
1012 expenses on updating data structures. */
1014 static unsigned int
1015 cgraph_decide_inlining (void)
1017 struct cgraph_node *node;
1018 int nnodes;
1019 struct cgraph_node **order =
1020 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1021 int old_insns = 0;
1022 int i;
1023 int initial_insns = 0;
1025 max_count = 0;
1026 for (node = cgraph_nodes; node; node = node->next)
1027 if (node->analyzed && (node->needed || node->reachable))
1029 struct cgraph_edge *e;
1031 initial_insns += node->local.self_insns;
1032 gcc_assert (node->local.self_insns == node->global.insns);
1033 for (e = node->callees; e; e = e->next_callee)
1034 if (max_count < e->count)
1035 max_count = e->count;
1037 overall_insns = initial_insns;
1038 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
1040 nnodes = cgraph_postorder (order);
1042 if (dump_file)
1043 fprintf (dump_file,
1044 "\nDeciding on inlining. Starting with %i insns.\n",
1045 initial_insns);
1047 for (node = cgraph_nodes; node; node = node->next)
1048 node->aux = 0;
1050 if (dump_file)
1051 fprintf (dump_file, "\nInlining always_inline functions:\n");
1053 /* In the first pass mark all always_inline edges. Do this with a priority
1054 so none of our later choices will make this impossible. */
1055 for (i = nnodes - 1; i >= 0; i--)
1057 struct cgraph_edge *e, *next;
1059 node = order[i];
1061 /* Handle nodes to be flattened, but don't update overall unit size. */
1062 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1064 if (dump_file)
1065 fprintf (dump_file,
1066 "Flattening %s\n", cgraph_node_name (node));
1067 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1070 if (!node->local.disregard_inline_limits)
1071 continue;
1072 if (dump_file)
1073 fprintf (dump_file,
1074 "\nConsidering %s %i insns (always inline)\n",
1075 cgraph_node_name (node), node->global.insns);
1076 old_insns = overall_insns;
1077 for (e = node->callers; e; e = next)
1079 next = e->next_caller;
1080 if (!e->inline_failed || CALL_CANNOT_INLINE_P (e->call_stmt))
1081 continue;
1082 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1083 &e->inline_failed))
1084 continue;
1085 cgraph_mark_inline_edge (e, true);
1086 if (dump_file)
1087 fprintf (dump_file,
1088 " Inlined into %s which now has %i insns.\n",
1089 cgraph_node_name (e->caller),
1090 e->caller->global.insns);
1092 /* Inlining self recursive function might introduce new calls to
1093 themselves we didn't see in the loop above. Fill in the proper
1094 reason why inline failed. */
1095 for (e = node->callers; e; e = e->next_caller)
1096 if (e->inline_failed)
1097 e->inline_failed = N_("recursive inlining");
1098 if (dump_file)
1099 fprintf (dump_file,
1100 " Inlined for a net change of %+i insns.\n",
1101 overall_insns - old_insns);
1104 if (!flag_really_no_inline)
1105 cgraph_decide_inlining_of_small_functions ();
1107 if (!flag_really_no_inline
1108 && flag_inline_functions_called_once)
1110 if (dump_file)
1111 fprintf (dump_file, "\nDeciding on functions called once:\n");
1113 /* And finally decide what functions are called once. */
1115 for (i = nnodes - 1; i >= 0; i--)
1117 node = order[i];
1119 if (node->callers && !node->callers->next_caller && !node->needed
1120 && node->local.inlinable && node->callers->inline_failed
1121 && !CALL_CANNOT_INLINE_P (node->callers->call_stmt)
1122 && !DECL_EXTERNAL (node->decl) && !DECL_COMDAT (node->decl))
1124 if (dump_file)
1126 fprintf (dump_file,
1127 "\nConsidering %s %i insns.\n",
1128 cgraph_node_name (node), node->global.insns);
1129 fprintf (dump_file,
1130 " Called once from %s %i insns.\n",
1131 cgraph_node_name (node->callers->caller),
1132 node->callers->caller->global.insns);
1135 old_insns = overall_insns;
1137 if (cgraph_check_inline_limits (node->callers->caller, node,
1138 NULL, false))
1140 cgraph_mark_inline (node->callers);
1141 if (dump_file)
1142 fprintf (dump_file,
1143 " Inlined into %s which now has %i insns"
1144 " for a net change of %+i insns.\n",
1145 cgraph_node_name (node->callers->caller),
1146 node->callers->caller->global.insns,
1147 overall_insns - old_insns);
1149 else
1151 if (dump_file)
1152 fprintf (dump_file,
1153 " Inline limit reached, not inlined.\n");
1159 if (dump_file)
1160 fprintf (dump_file,
1161 "\nInlined %i calls, eliminated %i functions, "
1162 "%i insns turned to %i insns.\n\n",
1163 ncalls_inlined, nfunctions_inlined, initial_insns,
1164 overall_insns);
1165 free (order);
1166 return 0;
1169 /* Try to inline edge E from incremental inliner. MODE specifies mode
1170 of inliner.
1172 We are detecting cycles by storing mode of inliner into cgraph_node last
1173 time we visited it in the recursion. In general when mode is set, we have
1174 recursive inlining, but as an special case, we want to try harder inline
1175 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1176 flatten, b being always inline. Flattening 'a' will collapse
1177 a->b->c before hitting cycle. To accommodate always inline, we however
1178 need to inline a->b->c->b.
1180 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1181 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1182 static bool
1183 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1185 struct cgraph_node *callee = e->callee;
1186 enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
1187 bool always_inline = e->callee->local.disregard_inline_limits;
1189 /* We've hit cycle? */
1190 if (callee_mode)
1192 /* It is first time we see it and we are not in ALWAY_INLINE only
1193 mode yet. and the function in question is always_inline. */
1194 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1196 if (dump_file)
1198 indent_to (dump_file, depth);
1199 fprintf (dump_file,
1200 "Hit cycle in %s, switching to always inline only.\n",
1201 cgraph_node_name (callee));
1203 mode = INLINE_ALWAYS_INLINE;
1205 /* Otherwise it is time to give up. */
1206 else
1208 if (dump_file)
1210 indent_to (dump_file, depth);
1211 fprintf (dump_file,
1212 "Not inlining %s into %s to avoid cycle.\n",
1213 cgraph_node_name (callee),
1214 cgraph_node_name (e->caller));
1216 e->inline_failed = (e->callee->local.disregard_inline_limits
1217 ? N_("recursive inlining") : "");
1218 return false;
1222 callee->aux = (void *)(size_t) mode;
1223 if (dump_file)
1225 indent_to (dump_file, depth);
1226 fprintf (dump_file, " Inlining %s into %s.\n",
1227 cgraph_node_name (e->callee),
1228 cgraph_node_name (e->caller));
1230 if (e->inline_failed)
1231 cgraph_mark_inline (e);
1233 /* In order to fully inline always_inline functions at -O0, we need to
1234 recurse here, since the inlined functions might not be processed by
1235 incremental inlining at all yet.
1237 Also flattening needs to be done recursively. */
1239 if (!flag_unit_at_a_time || mode == INLINE_ALL || always_inline)
1240 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1241 callee->aux = (void *)(size_t) callee_mode;
1242 return true;
1245 /* Decide on the inlining. We do so in the topological order to avoid
1246 expenses on updating data structures.
1247 DEPTH is depth of recursion, used only for debug output. */
1249 static bool
1250 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1251 enum inlining_mode mode,
1252 int depth)
1254 struct cgraph_edge *e;
1255 bool inlined = false;
1256 const char *failed_reason;
1257 enum inlining_mode old_mode;
1259 #ifdef ENABLE_CHECKING
1260 verify_cgraph_node (node);
1261 #endif
1263 old_mode = (enum inlining_mode) (size_t)node->aux;
1265 if (mode != INLINE_ALWAYS_INLINE
1266 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1268 if (dump_file)
1270 indent_to (dump_file, depth);
1271 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1273 mode = INLINE_ALL;
1276 node->aux = (void *)(size_t) mode;
1278 /* First of all look for always inline functions. */
1279 for (e = node->callees; e; e = e->next_callee)
1281 if (!e->callee->local.disregard_inline_limits
1282 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1283 continue;
1284 if (CALL_CANNOT_INLINE_P (e->call_stmt))
1285 continue;
1286 /* When the edge is already inlined, we just need to recurse into
1287 it in order to fully flatten the leaves. */
1288 if (!e->inline_failed && mode == INLINE_ALL)
1290 inlined |= try_inline (e, mode, depth);
1291 continue;
1293 if (dump_file)
1295 indent_to (dump_file, depth);
1296 fprintf (dump_file,
1297 "Considering to always inline inline candidate %s.\n",
1298 cgraph_node_name (e->callee));
1300 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1302 if (dump_file)
1304 indent_to (dump_file, depth);
1305 fprintf (dump_file, "Not inlining: recursive call.\n");
1307 continue;
1309 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1310 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1312 if (dump_file)
1314 indent_to (dump_file, depth);
1315 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1317 continue;
1319 if (!DECL_SAVED_TREE (e->callee->decl) && !e->callee->inline_decl)
1321 if (dump_file)
1323 indent_to (dump_file, depth);
1324 fprintf (dump_file,
1325 "Not inlining: Function body no longer available.\n");
1327 continue;
1329 inlined |= try_inline (e, mode, depth);
1332 /* Now do the automatic inlining. */
1333 if (!flag_really_no_inline && mode != INLINE_ALL
1334 && mode != INLINE_ALWAYS_INLINE)
1335 for (e = node->callees; e; e = e->next_callee)
1337 if (!e->callee->local.inlinable
1338 || !e->inline_failed
1339 || e->callee->local.disregard_inline_limits)
1340 continue;
1341 if (dump_file)
1342 fprintf (dump_file, "Considering inline candidate %s.\n",
1343 cgraph_node_name (e->callee));
1344 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1346 if (dump_file)
1348 indent_to (dump_file, depth);
1349 fprintf (dump_file, "Not inlining: recursive call.\n");
1351 continue;
1353 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1354 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1356 if (dump_file)
1358 indent_to (dump_file, depth);
1359 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1361 continue;
1363 /* When the function body would grow and inlining the function won't
1364 eliminate the need for offline copy of the function, don't inline.
1366 if (mode == INLINE_SIZE
1367 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1368 > e->caller->global.insns)
1369 && cgraph_estimate_growth (e->callee) > 0)
1371 if (dump_file)
1373 indent_to (dump_file, depth);
1374 fprintf (dump_file,
1375 "Not inlining: code size would grow by %i insns.\n",
1376 cgraph_estimate_size_after_inlining (1, e->caller,
1377 e->callee)
1378 - e->caller->global.insns);
1380 continue;
1382 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1383 false)
1384 || CALL_CANNOT_INLINE_P (e->call_stmt))
1386 if (dump_file)
1388 indent_to (dump_file, depth);
1389 fprintf (dump_file, "Not inlining: %s.\n", e->inline_failed);
1391 continue;
1393 if (!DECL_SAVED_TREE (e->callee->decl) && !e->callee->inline_decl)
1395 if (dump_file)
1397 indent_to (dump_file, depth);
1398 fprintf (dump_file,
1399 "Not inlining: Function body no longer available.\n");
1401 continue;
1403 if (cgraph_default_inline_p (e->callee, &failed_reason))
1404 inlined |= try_inline (e, mode, depth);
1405 else if (!flag_unit_at_a_time)
1406 e->inline_failed = failed_reason;
1408 node->aux = (void *)(size_t) old_mode;
1409 return inlined;
1412 /* When inlining shall be performed. */
1413 static bool
1414 cgraph_gate_inlining (void)
1416 return flag_inline_trees;
1419 struct tree_opt_pass pass_ipa_inline =
1421 "inline", /* name */
1422 cgraph_gate_inlining, /* gate */
1423 cgraph_decide_inlining, /* execute */
1424 NULL, /* sub */
1425 NULL, /* next */
1426 0, /* static_pass_number */
1427 TV_INLINE_HEURISTICS, /* tv_id */
1428 0, /* properties_required */
1429 PROP_cfg, /* properties_provided */
1430 0, /* properties_destroyed */
1431 TODO_remove_functions, /* todo_flags_finish */
1432 TODO_dump_cgraph | TODO_dump_func
1433 | TODO_remove_functions, /* todo_flags_finish */
1434 0 /* letter */
1437 /* Because inlining might remove no-longer reachable nodes, we need to
1438 keep the array visible to garbage collector to avoid reading collected
1439 out nodes. */
1440 static int nnodes;
1441 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1443 /* Do inlining of small functions. Doing so early helps profiling and other
1444 passes to be somewhat more effective and avoids some code duplication in
1445 later real inlining pass for testcases with very many function calls. */
1446 static unsigned int
1447 cgraph_early_inlining (void)
1449 struct cgraph_node *node = cgraph_node (current_function_decl);
1450 unsigned int todo = 0;
1452 if (sorrycount || errorcount)
1453 return 0;
1454 if (cgraph_decide_inlining_incrementally (node,
1455 flag_unit_at_a_time || optimize_size
1456 ? INLINE_SIZE : INLINE_SPEED, 0))
1458 timevar_push (TV_INTEGRATION);
1459 todo = optimize_inline_calls (current_function_decl);
1460 timevar_pop (TV_INTEGRATION);
1462 return todo;
1465 /* When inlining shall be performed. */
1466 static bool
1467 cgraph_gate_early_inlining (void)
1469 return flag_inline_trees && flag_early_inlining;
1472 struct tree_opt_pass pass_early_inline =
1474 "einline", /* name */
1475 cgraph_gate_early_inlining, /* gate */
1476 cgraph_early_inlining, /* execute */
1477 NULL, /* sub */
1478 NULL, /* next */
1479 0, /* static_pass_number */
1480 TV_INLINE_HEURISTICS, /* tv_id */
1481 0, /* properties_required */
1482 PROP_cfg, /* properties_provided */
1483 0, /* properties_destroyed */
1484 0, /* todo_flags_start */
1485 TODO_dump_func, /* todo_flags_finish */
1486 0 /* letter */
1489 /* When inlining shall be performed. */
1490 static bool
1491 cgraph_gate_ipa_early_inlining (void)
1493 return (flag_inline_trees && flag_early_inlining
1494 && (flag_branch_probabilities || flag_test_coverage
1495 || profile_arc_flag));
1498 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1499 before tree profiling so we have stand alone IPA pass for doing so. */
1500 struct tree_opt_pass pass_ipa_early_inline =
1502 "einline_ipa", /* name */
1503 cgraph_gate_ipa_early_inlining, /* gate */
1504 NULL, /* execute */
1505 NULL, /* sub */
1506 NULL, /* next */
1507 0, /* static_pass_number */
1508 TV_INLINE_HEURISTICS, /* tv_id */
1509 0, /* properties_required */
1510 PROP_cfg, /* properties_provided */
1511 0, /* properties_destroyed */
1512 0, /* todo_flags_start */
1513 TODO_dump_cgraph, /* todo_flags_finish */
1514 0 /* letter */
1517 /* Compute parameters of functions used by inliner. */
1518 static unsigned int
1519 compute_inline_parameters (void)
1521 struct cgraph_node *node = cgraph_node (current_function_decl);
1523 gcc_assert (!node->global.inlined_to);
1524 node->local.estimated_self_stack_size = estimated_stack_frame_size ();
1525 node->global.estimated_stack_size = node->local.estimated_self_stack_size;
1526 node->global.stack_frame_offset = 0;
1527 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1528 node->local.self_insns = estimate_num_insns (current_function_decl,
1529 &eni_inlining_weights);
1530 if (node->local.inlinable && !node->local.disregard_inline_limits)
1531 node->local.disregard_inline_limits
1532 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
1533 if (flag_really_no_inline && !node->local.disregard_inline_limits)
1534 node->local.inlinable = 0;
1535 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1536 node->global.insns = node->local.self_insns;
1537 return 0;
1540 /* When inlining shall be performed. */
1541 static bool
1542 gate_inline_passes (void)
1544 return flag_inline_trees;
1547 struct tree_opt_pass pass_inline_parameters =
1549 NULL, /* name */
1550 gate_inline_passes, /* gate */
1551 compute_inline_parameters, /* execute */
1552 NULL, /* sub */
1553 NULL, /* next */
1554 0, /* static_pass_number */
1555 TV_INLINE_HEURISTICS, /* tv_id */
1556 0, /* properties_required */
1557 PROP_cfg, /* properties_provided */
1558 0, /* properties_destroyed */
1559 0, /* todo_flags_start */
1560 0, /* todo_flags_finish */
1561 0 /* letter */
1564 /* Apply inline plan to the function. */
1565 static unsigned int
1566 apply_inline (void)
1568 unsigned int todo = 0;
1569 struct cgraph_edge *e;
1570 struct cgraph_node *node = cgraph_node (current_function_decl);
1572 /* Even when not optimizing, ensure that always_inline functions get inlined.
1574 if (!optimize)
1575 cgraph_decide_inlining_incrementally (node, INLINE_SPEED, 0);
1577 /* We might need the body of this function so that we can expand
1578 it inline somewhere else. */
1579 if (cgraph_preserve_function_body_p (current_function_decl))
1580 save_inline_function_body (node);
1582 for (e = node->callees; e; e = e->next_callee)
1583 if (!e->inline_failed || warn_inline)
1584 break;
1585 if (e)
1587 timevar_push (TV_INTEGRATION);
1588 todo = optimize_inline_calls (current_function_decl);
1589 timevar_pop (TV_INTEGRATION);
1591 /* In non-unit-at-a-time we must mark all referenced functions as needed. */
1592 if (!flag_unit_at_a_time)
1594 struct cgraph_edge *e;
1595 for (e = node->callees; e; e = e->next_callee)
1596 if (e->callee->analyzed)
1597 cgraph_mark_needed_node (e->callee);
1599 return todo | execute_fixup_cfg ();
1602 struct tree_opt_pass pass_apply_inline =
1604 "apply_inline", /* name */
1605 NULL, /* gate */
1606 apply_inline, /* execute */
1607 NULL, /* sub */
1608 NULL, /* next */
1609 0, /* static_pass_number */
1610 TV_INLINE_HEURISTICS, /* tv_id */
1611 0, /* properties_required */
1612 PROP_cfg, /* properties_provided */
1613 0, /* properties_destroyed */
1614 0, /* todo_flags_start */
1615 TODO_dump_func | TODO_verify_flow
1616 | TODO_verify_stmts, /* todo_flags_finish */
1617 0 /* letter */
1620 #include "gt-ipa-inline.h"