* charset.c (convert_using_iconv): Close out any shift states,
[official-gcc.git] / gcc / ipa-inline.c
blob2ea5f73cdc92274711d879008e7b330ca43572cb
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
26 callgraph.
28 There are three major parts of this file:
30 cgraph_mark_inline implementation
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
34 statistics)
36 inlining heuristics limits
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
40 growth and so on).
42 inlining heuristics
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used in non-unit-at-a-time mode.
65 The inliner itself is split into several passes:
67 pass_inline_parameters
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
76 on the function body.
78 pass_early_inlining
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
83 whole unit.
85 This is the main inlining pass in non-unit-at-a-time.
87 With unit-at-a-time the pass is run during conversion into SSA form.
88 Only functions already converted into SSA form are inlined, so the
89 conversion must happen in topological order on the callgraph (that is
90 maintained by pass manager). The functions after inlining are early
91 optimized so the early inliner sees unoptimized function itself, but
92 all considered callees are already optimized allowing it to unfold
93 abstraction penalty on C++ effectively and cheaply.
95 pass_ipa_early_inlining
97 With profiling, the early inlining is also necessary to reduce
98 instrumentation costs on program with high abstraction penalty (doing
99 many redundant calls). This can't happen in parallel with early
100 optimization and profile instrumentation, because we would end up
101 re-instrumenting already instrumented function bodies we brought in via
102 inlining.
104 To avoid this, this pass is executed as IPA pass before profiling. It is
105 simple wrapper to pass_early_inlining and ensures first inlining.
107 pass_ipa_inline
109 This is the main pass implementing simple greedy algorithm to do inlining
110 of small functions that results in overall growth of compilation unit and
111 inlining of functions called once. The pass compute just so called inline
112 plan (representation of inlining to be done in callgraph) and unlike early
113 inlining it is not performing the inlining itself.
115 pass_apply_inline
117 This pass performs actual inlining according to pass_ipa_inline on given
118 function. Possible the function body before inlining is saved when it is
119 needed for further inlining later.
122 #include "config.h"
123 #include "system.h"
124 #include "coretypes.h"
125 #include "tm.h"
126 #include "tree.h"
127 #include "tree-inline.h"
128 #include "langhooks.h"
129 #include "flags.h"
130 #include "cgraph.h"
131 #include "diagnostic.h"
132 #include "timevar.h"
133 #include "params.h"
134 #include "fibheap.h"
135 #include "intl.h"
136 #include "tree-pass.h"
137 #include "hashtab.h"
138 #include "coverage.h"
139 #include "ggc.h"
140 #include "tree-flow.h"
141 #include "rtl.h"
143 /* Mode incremental inliner operate on:
145 In ALWAYS_INLINE only functions marked
146 always_inline are inlined. This mode is used after detecting cycle during
147 flattening.
149 In SIZE mode, only functions that reduce function body size after inlining
150 are inlined, this is used during early inlining.
152 In SPEED mode, all small functions are inlined. This might result in
153 unbounded growth of compilation unit and is used only in non-unit-at-a-time
154 mode.
156 in ALL mode, everything is inlined. This is used during flattening. */
157 enum inlining_mode {
158 INLINE_NONE = 0,
159 INLINE_ALWAYS_INLINE,
160 INLINE_SIZE,
161 INLINE_SPEED,
162 INLINE_ALL
164 static bool
165 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
166 int);
169 /* Statistics we collect about inlining algorithm. */
170 static int ncalls_inlined;
171 static int nfunctions_inlined;
172 static int overall_insns;
173 static gcov_type max_count;
175 /* Estimate size of the function after inlining WHAT into TO. */
177 static int
178 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
179 struct cgraph_node *what)
181 int size;
182 tree fndecl = what->decl, arg;
183 int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
185 for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
186 call_insns += estimate_move_cost (TREE_TYPE (arg));
187 size = (what->global.insns - call_insns) * times + to->global.insns;
188 gcc_assert (size >= 0);
189 return size;
192 /* E is expected to be an edge being inlined. Clone destination node of
193 the edge and redirect it to the new clone.
194 DUPLICATE is used for bookkeeping on whether we are actually creating new
195 clones or re-using node originally representing out-of-line function call.
197 void
198 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, bool update_original)
200 HOST_WIDE_INT peak;
201 if (duplicate)
203 /* We may eliminate the need for out-of-line copy to be output.
204 In that case just go ahead and re-use it. */
205 if (!e->callee->callers->next_caller
206 && !e->callee->needed
207 && !cgraph_new_nodes
208 && flag_unit_at_a_time)
210 gcc_assert (!e->callee->global.inlined_to);
211 if (DECL_SAVED_TREE (e->callee->decl))
212 overall_insns -= e->callee->global.insns, nfunctions_inlined++;
213 duplicate = false;
215 else
217 struct cgraph_node *n;
218 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
219 update_original);
220 cgraph_redirect_edge_callee (e, n);
224 if (e->caller->global.inlined_to)
225 e->callee->global.inlined_to = e->caller->global.inlined_to;
226 else
227 e->callee->global.inlined_to = e->caller;
228 e->callee->global.stack_frame_offset
229 = e->caller->global.stack_frame_offset + e->caller->local.estimated_self_stack_size;
230 peak = e->callee->global.stack_frame_offset + e->callee->local.estimated_self_stack_size;
231 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
232 e->callee->global.inlined_to->global.estimated_stack_size = peak;
234 /* Recursively clone all bodies. */
235 for (e = e->callee->callees; e; e = e->next_callee)
236 if (!e->inline_failed)
237 cgraph_clone_inlined_nodes (e, duplicate, update_original);
240 /* Mark edge E as inlined and update callgraph accordingly.
241 UPDATE_ORIGINAL specify whether profile of original function should be
242 updated. */
244 void
245 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original)
247 int old_insns = 0, new_insns = 0;
248 struct cgraph_node *to = NULL, *what;
250 if (e->callee->inline_decl)
251 cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
253 gcc_assert (e->inline_failed);
254 e->inline_failed = NULL;
256 if (!e->callee->global.inlined && flag_unit_at_a_time)
257 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
258 e->callee->global.inlined = true;
260 cgraph_clone_inlined_nodes (e, true, update_original);
262 what = e->callee;
264 /* Now update size of caller and all functions caller is inlined into. */
265 for (;e && !e->inline_failed; e = e->caller->callers)
267 old_insns = e->caller->global.insns;
268 new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
269 what);
270 gcc_assert (new_insns >= 0);
271 to = e->caller;
272 to->global.insns = new_insns;
274 gcc_assert (what->global.inlined_to == to);
275 if (new_insns > old_insns)
276 overall_insns += new_insns - old_insns;
277 ncalls_inlined++;
280 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
281 Return following unredirected edge in the list of callers
282 of EDGE->CALLEE */
284 static struct cgraph_edge *
285 cgraph_mark_inline (struct cgraph_edge *edge)
287 struct cgraph_node *to = edge->caller;
288 struct cgraph_node *what = edge->callee;
289 struct cgraph_edge *e, *next;
291 gcc_assert (!CALL_CANNOT_INLINE_P (edge->call_stmt));
292 /* Look for all calls, mark them inline and clone recursively
293 all inlined functions. */
294 for (e = what->callers; e; e = next)
296 next = e->next_caller;
297 if (e->caller == to && e->inline_failed)
299 cgraph_mark_inline_edge (e, true);
300 if (e == edge)
301 edge = next;
305 return edge;
308 /* Estimate the growth caused by inlining NODE into all callees. */
310 static int
311 cgraph_estimate_growth (struct cgraph_node *node)
313 int growth = 0;
314 struct cgraph_edge *e;
315 if (node->global.estimated_growth != INT_MIN)
316 return node->global.estimated_growth;
318 for (e = node->callers; e; e = e->next_caller)
319 if (e->inline_failed)
320 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
321 - e->caller->global.insns);
323 /* ??? Wrong for self recursive functions or cases where we decide to not
324 inline for different reasons, but it is not big deal as in that case
325 we will keep the body around, but we will also avoid some inlining. */
326 if (!node->needed && !DECL_EXTERNAL (node->decl))
327 growth -= node->global.insns;
329 node->global.estimated_growth = growth;
330 return growth;
333 /* Return false when inlining WHAT into TO is not good idea
334 as it would cause too large growth of function bodies.
335 When ONE_ONLY is true, assume that only one call site is going
336 to be inlined, otherwise figure out how many call sites in
337 TO calls WHAT and verify that all can be inlined.
340 static bool
341 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
342 const char **reason, bool one_only)
344 int times = 0;
345 struct cgraph_edge *e;
346 int newsize;
347 int limit;
348 HOST_WIDE_INT stack_size_limit, inlined_stack;
350 if (one_only)
351 times = 1;
352 else
353 for (e = to->callees; e; e = e->next_callee)
354 if (e->callee == what)
355 times++;
357 if (to->global.inlined_to)
358 to = to->global.inlined_to;
360 /* When inlining large function body called once into small function,
361 take the inlined function as base for limiting the growth. */
362 if (to->local.self_insns > what->local.self_insns)
363 limit = to->local.self_insns;
364 else
365 limit = what->local.self_insns;
367 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
369 /* Check the size after inlining against the function limits. But allow
370 the function to shrink if it went over the limits by forced inlining. */
371 newsize = cgraph_estimate_size_after_inlining (times, to, what);
372 if (newsize >= to->global.insns
373 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
374 && newsize > limit)
376 if (reason)
377 *reason = N_("--param large-function-growth limit reached");
378 return false;
381 stack_size_limit = to->local.estimated_self_stack_size;
383 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
385 inlined_stack = (to->global.stack_frame_offset
386 + to->local.estimated_self_stack_size
387 + what->global.estimated_stack_size);
388 if (inlined_stack > stack_size_limit
389 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
391 if (reason)
392 *reason = N_("--param large-stack-frame-growth limit reached");
393 return false;
395 return true;
398 /* Return true when function N is small enough to be inlined. */
400 bool
401 cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
403 tree decl = n->decl;
405 if (n->inline_decl)
406 decl = n->inline_decl;
407 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
409 if (reason)
410 *reason = N_("function not inline candidate");
411 return false;
414 if (!DECL_STRUCT_FUNCTION (decl)->cfg)
416 if (reason)
417 *reason = N_("function body not available");
418 return false;
421 if (DECL_DECLARED_INLINE_P (decl))
423 if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
425 if (reason)
426 *reason = N_("--param max-inline-insns-single limit reached");
427 return false;
430 else
432 if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
434 if (reason)
435 *reason = N_("--param max-inline-insns-auto limit reached");
436 return false;
440 return true;
443 /* Return true when inlining WHAT would create recursive inlining.
444 We call recursive inlining all cases where same function appears more than
445 once in the single recursion nest path in the inline graph. */
447 static bool
448 cgraph_recursive_inlining_p (struct cgraph_node *to,
449 struct cgraph_node *what,
450 const char **reason)
452 bool recursive;
453 if (to->global.inlined_to)
454 recursive = what->decl == to->global.inlined_to->decl;
455 else
456 recursive = what->decl == to->decl;
457 /* Marking recursive function inline has sane semantic and thus we should
458 not warn on it. */
459 if (recursive && reason)
460 *reason = (what->local.disregard_inline_limits
461 ? N_("recursive inlining") : "");
462 return recursive;
465 /* Return true if the call can be hot. */
466 static bool
467 cgraph_maybe_hot_edge_p (struct cgraph_edge *edge)
469 if (profile_info && flag_branch_probabilities
470 && (edge->count
471 <= profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION)))
472 return false;
473 if (lookup_attribute ("cold", DECL_ATTRIBUTES (edge->callee->decl))
474 || lookup_attribute ("cold", DECL_ATTRIBUTES (edge->caller->decl)))
475 return false;
476 if (lookup_attribute ("hot", DECL_ATTRIBUTES (edge->caller->decl)))
477 return true;
478 if (flag_guess_branch_prob
479 && edge->frequency < (CGRAPH_FREQ_MAX
480 / PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)))
481 return false;
482 return true;
485 /* A cost model driving the inlining heuristics in a way so the edges with
486 smallest badness are inlined first. After each inlining is performed
487 the costs of all caller edges of nodes affected are recomputed so the
488 metrics may accurately depend on values such as number of inlinable callers
489 of the function or function body size. */
491 static int
492 cgraph_edge_badness (struct cgraph_edge *edge)
494 int badness;
495 int growth =
496 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
498 growth -= edge->caller->global.insns;
500 /* Always prefer inlining saving code size. */
501 if (growth <= 0)
502 badness = INT_MIN - growth;
504 /* When profiling is available, base priorities -(#calls / growth).
505 So we optimize for overall number of "executed" inlined calls. */
506 else if (max_count)
507 badness = ((int)((double)edge->count * INT_MIN / max_count)) / growth;
509 /* When function local profile is available, base priorities on
510 growth / frequency, so we optimize for overall frequency of inlined
511 calls. This is not too accurate since while the call might be frequent
512 within function, the function itself is infrequent.
514 Other objective to optimize for is number of different calls inlined.
515 We add the estimated growth after inlining all functions to biass the
516 priorities slightly in this direction (so fewer times called functions
517 of the same size gets priority). */
518 else if (flag_guess_branch_prob)
520 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE;
521 int growth =
522 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
523 growth -= edge->caller->global.insns;
524 badness = growth * 256;
526 /* Decrease badness if call is nested. */
527 /* Compress the range so we don't overflow. */
528 if (div > 256)
529 div = 256 + ceil_log2 (div) - 8;
530 if (div < 1)
531 div = 1;
532 if (badness > 0)
533 badness /= div;
534 badness += cgraph_estimate_growth (edge->callee);
536 /* When function local profile is not available or it does not give
537 useful information (ie frequency is zero), base the cost on
538 loop nest and overall size growth, so we optimize for overall number
539 of functions fully inlined in program. */
540 else
542 int nest = MIN (edge->loop_nest, 8);
543 badness = cgraph_estimate_growth (edge->callee) * 256;
545 /* Decrease badness if call is nested. */
546 if (badness > 0)
547 badness >>= nest;
548 else
550 badness <<= nest;
553 /* Make recursive inlining happen always after other inlining is done. */
554 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
555 return badness + 1;
556 else
557 return badness;
560 /* Recompute heap nodes for each of caller edge. */
562 static void
563 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
564 bitmap updated_nodes)
566 struct cgraph_edge *edge;
567 const char *failed_reason;
569 if (!node->local.inlinable || node->local.disregard_inline_limits
570 || node->global.inlined_to)
571 return;
572 if (bitmap_bit_p (updated_nodes, node->uid))
573 return;
574 bitmap_set_bit (updated_nodes, node->uid);
575 node->global.estimated_growth = INT_MIN;
577 if (!node->local.inlinable)
578 return;
579 /* Prune out edges we won't inline into anymore. */
580 if (!cgraph_default_inline_p (node, &failed_reason))
582 for (edge = node->callers; edge; edge = edge->next_caller)
583 if (edge->aux)
585 fibheap_delete_node (heap, (fibnode_t) edge->aux);
586 edge->aux = NULL;
587 if (edge->inline_failed)
588 edge->inline_failed = failed_reason;
590 return;
593 for (edge = node->callers; edge; edge = edge->next_caller)
594 if (edge->inline_failed)
596 int badness = cgraph_edge_badness (edge);
597 if (edge->aux)
599 fibnode_t n = (fibnode_t) edge->aux;
600 gcc_assert (n->data == edge);
601 if (n->key == badness)
602 continue;
604 /* fibheap_replace_key only increase the keys. */
605 if (fibheap_replace_key (heap, n, badness))
606 continue;
607 fibheap_delete_node (heap, (fibnode_t) edge->aux);
609 edge->aux = fibheap_insert (heap, badness, edge);
613 /* Recompute heap nodes for each of caller edges of each of callees. */
615 static void
616 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
617 bitmap updated_nodes)
619 struct cgraph_edge *e;
620 node->global.estimated_growth = INT_MIN;
622 for (e = node->callees; e; e = e->next_callee)
623 if (e->inline_failed)
624 update_caller_keys (heap, e->callee, updated_nodes);
625 else if (!e->inline_failed)
626 update_callee_keys (heap, e->callee, updated_nodes);
629 /* Enqueue all recursive calls from NODE into priority queue depending on
630 how likely we want to recursively inline the call. */
632 static void
633 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
634 fibheap_t heap)
636 static int priority;
637 struct cgraph_edge *e;
638 for (e = where->callees; e; e = e->next_callee)
639 if (e->callee == node)
641 /* When profile feedback is available, prioritize by expected number
642 of calls. Without profile feedback we maintain simple queue
643 to order candidates via recursive depths. */
644 fibheap_insert (heap,
645 !max_count ? priority++
646 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
649 for (e = where->callees; e; e = e->next_callee)
650 if (!e->inline_failed)
651 lookup_recursive_calls (node, e->callee, heap);
654 /* Decide on recursive inlining: in the case function has recursive calls,
655 inline until body size reaches given argument. */
657 static bool
658 cgraph_decide_recursive_inlining (struct cgraph_node *node)
660 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
661 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
662 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
663 fibheap_t heap;
664 struct cgraph_edge *e;
665 struct cgraph_node *master_clone, *next;
666 int depth = 0;
667 int n = 0;
669 if (optimize_size
670 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
671 return false;
673 if (DECL_DECLARED_INLINE_P (node->decl))
675 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
676 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
679 /* Make sure that function is small enough to be considered for inlining. */
680 if (!max_depth
681 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
682 return false;
683 heap = fibheap_new ();
684 lookup_recursive_calls (node, node, heap);
685 if (fibheap_empty (heap))
687 fibheap_delete (heap);
688 return false;
691 if (dump_file)
692 fprintf (dump_file,
693 " Performing recursive inlining on %s\n",
694 cgraph_node_name (node));
696 /* We need original clone to copy around. */
697 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1, false);
698 master_clone->needed = true;
699 for (e = master_clone->callees; e; e = e->next_callee)
700 if (!e->inline_failed)
701 cgraph_clone_inlined_nodes (e, true, false);
703 /* Do the inlining and update list of recursive call during process. */
704 while (!fibheap_empty (heap)
705 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
706 <= limit))
708 struct cgraph_edge *curr
709 = (struct cgraph_edge *) fibheap_extract_min (heap);
710 struct cgraph_node *cnode;
712 depth = 1;
713 for (cnode = curr->caller;
714 cnode->global.inlined_to; cnode = cnode->callers->caller)
715 if (node->decl == curr->callee->decl)
716 depth++;
717 if (depth > max_depth)
719 if (dump_file)
720 fprintf (dump_file,
721 " maximal depth reached\n");
722 continue;
725 if (max_count)
727 if (!cgraph_maybe_hot_edge_p (curr))
729 if (dump_file)
730 fprintf (dump_file, " Not inlining cold call\n");
731 continue;
733 if (curr->count * 100 / node->count < probability)
735 if (dump_file)
736 fprintf (dump_file,
737 " Probability of edge is too small\n");
738 continue;
742 if (dump_file)
744 fprintf (dump_file,
745 " Inlining call of depth %i", depth);
746 if (node->count)
748 fprintf (dump_file, " called approx. %.2f times per call",
749 (double)curr->count / node->count);
751 fprintf (dump_file, "\n");
753 cgraph_redirect_edge_callee (curr, master_clone);
754 cgraph_mark_inline_edge (curr, false);
755 lookup_recursive_calls (node, curr->callee, heap);
756 n++;
758 if (!fibheap_empty (heap) && dump_file)
759 fprintf (dump_file, " Recursive inlining growth limit met.\n");
761 fibheap_delete (heap);
762 if (dump_file)
763 fprintf (dump_file,
764 "\n Inlined %i times, body grown from %i to %i insns\n", n,
765 master_clone->global.insns, node->global.insns);
767 /* Remove master clone we used for inlining. We rely that clones inlined
768 into master clone gets queued just before master clone so we don't
769 need recursion. */
770 for (node = cgraph_nodes; node != master_clone;
771 node = next)
773 next = node->next;
774 if (node->global.inlined_to == master_clone)
775 cgraph_remove_node (node);
777 cgraph_remove_node (master_clone);
778 /* FIXME: Recursive inlining actually reduces number of calls of the
779 function. At this place we should probably walk the function and
780 inline clones and compensate the counts accordingly. This probably
781 doesn't matter much in practice. */
782 return n > 0;
785 /* Set inline_failed for all callers of given function to REASON. */
787 static void
788 cgraph_set_inline_failed (struct cgraph_node *node, const char *reason)
790 struct cgraph_edge *e;
792 if (dump_file)
793 fprintf (dump_file, "Inlining failed: %s\n", reason);
794 for (e = node->callers; e; e = e->next_caller)
795 if (e->inline_failed)
796 e->inline_failed = reason;
799 /* Given whole compilation unit estimate of INSNS, compute how large we can
800 allow the unit to grow. */
801 static int
802 compute_max_insns (int insns)
804 int max_insns = insns;
805 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
806 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
808 return ((HOST_WIDEST_INT) max_insns
809 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
812 /* We use greedy algorithm for inlining of small functions:
813 All inline candidates are put into prioritized heap based on estimated
814 growth of the overall number of instructions and then update the estimates.
816 INLINED and INLINED_CALEES are just pointers to arrays large enough
817 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
819 static void
820 cgraph_decide_inlining_of_small_functions (void)
822 struct cgraph_node *node;
823 struct cgraph_edge *edge;
824 const char *failed_reason;
825 fibheap_t heap = fibheap_new ();
826 bitmap updated_nodes = BITMAP_ALLOC (NULL);
827 int min_insns, max_insns;
829 if (dump_file)
830 fprintf (dump_file, "\nDeciding on smaller functions:\n");
832 /* Put all inline candidates into the heap. */
834 for (node = cgraph_nodes; node; node = node->next)
836 if (!node->local.inlinable || !node->callers
837 || node->local.disregard_inline_limits)
838 continue;
839 if (dump_file)
840 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
842 node->global.estimated_growth = INT_MIN;
843 if (!cgraph_default_inline_p (node, &failed_reason))
845 cgraph_set_inline_failed (node, failed_reason);
846 continue;
849 for (edge = node->callers; edge; edge = edge->next_caller)
850 if (edge->inline_failed)
852 gcc_assert (!edge->aux);
853 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
857 max_insns = compute_max_insns (overall_insns);
858 min_insns = overall_insns;
860 while (overall_insns <= max_insns
861 && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
863 int old_insns = overall_insns;
864 struct cgraph_node *where;
865 int growth =
866 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
867 const char *not_good = NULL;
869 growth -= edge->caller->global.insns;
871 if (dump_file)
873 fprintf (dump_file,
874 "\nConsidering %s with %i insns\n",
875 cgraph_node_name (edge->callee),
876 edge->callee->global.insns);
877 fprintf (dump_file,
878 " to be inlined into %s\n"
879 " Estimated growth after inlined into all callees is %+i insns.\n"
880 " Estimated badness is %i, frequency %.2f.\n",
881 cgraph_node_name (edge->caller),
882 cgraph_estimate_growth (edge->callee),
883 cgraph_edge_badness (edge),
884 edge->frequency / (double)CGRAPH_FREQ_BASE);
885 if (edge->count)
886 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
888 gcc_assert (edge->aux);
889 edge->aux = NULL;
890 if (!edge->inline_failed)
891 continue;
893 /* When not having profile info ready we don't weight by any way the
894 position of call in procedure itself. This means if call of
895 function A from function B seems profitable to inline, the recursive
896 call of function A in inline copy of A in B will look profitable too
897 and we end up inlining until reaching maximal function growth. This
898 is not good idea so prohibit the recursive inlining.
900 ??? When the frequencies are taken into account we might not need this
901 restriction. */
902 if (!max_count)
904 where = edge->caller;
905 while (where->global.inlined_to)
907 if (where->decl == edge->callee->decl)
908 break;
909 where = where->callers->caller;
911 if (where->global.inlined_to)
913 edge->inline_failed
914 = (edge->callee->local.disregard_inline_limits ? N_("recursive inlining") : "");
915 if (dump_file)
916 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
917 continue;
921 if (!cgraph_maybe_hot_edge_p (edge))
922 not_good = N_("call is unlikely and code size would grow");
923 if (!flag_inline_functions
924 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
925 not_good = N_("function not declared inline and code size would grow");
926 if (optimize_size)
927 not_good = N_("optimizing for size and code size would grow");
928 if (not_good && growth > 0)
930 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
931 &edge->inline_failed))
933 edge->inline_failed = not_good;
934 if (dump_file)
935 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
937 continue;
939 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
941 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
942 &edge->inline_failed))
944 if (dump_file)
945 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
947 continue;
949 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
950 &edge->inline_failed))
952 where = edge->caller;
953 if (where->global.inlined_to)
954 where = where->global.inlined_to;
955 if (!cgraph_decide_recursive_inlining (where))
956 continue;
957 update_callee_keys (heap, where, updated_nodes);
959 else
961 struct cgraph_node *callee;
962 if (CALL_CANNOT_INLINE_P (edge->call_stmt)
963 || !cgraph_check_inline_limits (edge->caller, edge->callee,
964 &edge->inline_failed, true))
966 if (dump_file)
967 fprintf (dump_file, " Not inlining into %s:%s.\n",
968 cgraph_node_name (edge->caller), edge->inline_failed);
969 continue;
971 callee = edge->callee;
972 cgraph_mark_inline_edge (edge, true);
973 update_callee_keys (heap, callee, updated_nodes);
975 where = edge->caller;
976 if (where->global.inlined_to)
977 where = where->global.inlined_to;
979 /* Our profitability metric can depend on local properties
980 such as number of inlinable calls and size of the function body.
981 After inlining these properties might change for the function we
982 inlined into (since it's body size changed) and for the functions
983 called by function we inlined (since number of it inlinable callers
984 might change). */
985 update_caller_keys (heap, where, updated_nodes);
986 bitmap_clear (updated_nodes);
988 if (dump_file)
990 fprintf (dump_file,
991 " Inlined into %s which now has %i insns,"
992 "net change of %+i insns.\n",
993 cgraph_node_name (edge->caller),
994 edge->caller->global.insns,
995 overall_insns - old_insns);
997 if (min_insns > overall_insns)
999 min_insns = overall_insns;
1000 max_insns = compute_max_insns (min_insns);
1002 if (dump_file)
1003 fprintf (dump_file, "New minimal insns reached: %i\n", min_insns);
1006 while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
1008 gcc_assert (edge->aux);
1009 edge->aux = NULL;
1010 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1011 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1012 &edge->inline_failed))
1013 edge->inline_failed = N_("--param inline-unit-growth limit reached");
1015 fibheap_delete (heap);
1016 BITMAP_FREE (updated_nodes);
1019 /* Decide on the inlining. We do so in the topological order to avoid
1020 expenses on updating data structures. */
1022 static unsigned int
1023 cgraph_decide_inlining (void)
1025 struct cgraph_node *node;
1026 int nnodes;
1027 struct cgraph_node **order =
1028 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1029 int old_insns = 0;
1030 int i;
1031 int initial_insns = 0;
1033 max_count = 0;
1034 for (node = cgraph_nodes; node; node = node->next)
1035 if (node->analyzed && (node->needed || node->reachable))
1037 struct cgraph_edge *e;
1039 initial_insns += node->local.self_insns;
1040 gcc_assert (node->local.self_insns == node->global.insns);
1041 for (e = node->callees; e; e = e->next_callee)
1042 if (max_count < e->count)
1043 max_count = e->count;
1045 overall_insns = initial_insns;
1046 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
1048 nnodes = cgraph_postorder (order);
1050 if (dump_file)
1051 fprintf (dump_file,
1052 "\nDeciding on inlining. Starting with %i insns.\n",
1053 initial_insns);
1055 for (node = cgraph_nodes; node; node = node->next)
1056 node->aux = 0;
1058 if (dump_file)
1059 fprintf (dump_file, "\nInlining always_inline functions:\n");
1061 /* In the first pass mark all always_inline edges. Do this with a priority
1062 so none of our later choices will make this impossible. */
1063 for (i = nnodes - 1; i >= 0; i--)
1065 struct cgraph_edge *e, *next;
1067 node = order[i];
1069 /* Handle nodes to be flattened, but don't update overall unit size. */
1070 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1072 if (dump_file)
1073 fprintf (dump_file,
1074 "Flattening %s\n", cgraph_node_name (node));
1075 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1078 if (!node->local.disregard_inline_limits)
1079 continue;
1080 if (dump_file)
1081 fprintf (dump_file,
1082 "\nConsidering %s %i insns (always inline)\n",
1083 cgraph_node_name (node), node->global.insns);
1084 old_insns = overall_insns;
1085 for (e = node->callers; e; e = next)
1087 next = e->next_caller;
1088 if (!e->inline_failed || CALL_CANNOT_INLINE_P (e->call_stmt))
1089 continue;
1090 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1091 &e->inline_failed))
1092 continue;
1093 cgraph_mark_inline_edge (e, true);
1094 if (dump_file)
1095 fprintf (dump_file,
1096 " Inlined into %s which now has %i insns.\n",
1097 cgraph_node_name (e->caller),
1098 e->caller->global.insns);
1100 /* Inlining self recursive function might introduce new calls to
1101 themselves we didn't see in the loop above. Fill in the proper
1102 reason why inline failed. */
1103 for (e = node->callers; e; e = e->next_caller)
1104 if (e->inline_failed)
1105 e->inline_failed = N_("recursive inlining");
1106 if (dump_file)
1107 fprintf (dump_file,
1108 " Inlined for a net change of %+i insns.\n",
1109 overall_insns - old_insns);
1112 if (!flag_really_no_inline)
1113 cgraph_decide_inlining_of_small_functions ();
1115 if (!flag_really_no_inline
1116 && flag_inline_functions_called_once)
1118 if (dump_file)
1119 fprintf (dump_file, "\nDeciding on functions called once:\n");
1121 /* And finally decide what functions are called once. */
1123 for (i = nnodes - 1; i >= 0; i--)
1125 node = order[i];
1127 if (node->callers && !node->callers->next_caller && !node->needed
1128 && node->local.inlinable && node->callers->inline_failed
1129 && !CALL_CANNOT_INLINE_P (node->callers->call_stmt)
1130 && !DECL_EXTERNAL (node->decl) && !DECL_COMDAT (node->decl))
1132 if (dump_file)
1134 fprintf (dump_file,
1135 "\nConsidering %s %i insns.\n",
1136 cgraph_node_name (node), node->global.insns);
1137 fprintf (dump_file,
1138 " Called once from %s %i insns.\n",
1139 cgraph_node_name (node->callers->caller),
1140 node->callers->caller->global.insns);
1143 old_insns = overall_insns;
1145 if (cgraph_check_inline_limits (node->callers->caller, node,
1146 NULL, false))
1148 cgraph_mark_inline (node->callers);
1149 if (dump_file)
1150 fprintf (dump_file,
1151 " Inlined into %s which now has %i insns"
1152 " for a net change of %+i insns.\n",
1153 cgraph_node_name (node->callers->caller),
1154 node->callers->caller->global.insns,
1155 overall_insns - old_insns);
1157 else
1159 if (dump_file)
1160 fprintf (dump_file,
1161 " Inline limit reached, not inlined.\n");
1167 if (dump_file)
1168 fprintf (dump_file,
1169 "\nInlined %i calls, eliminated %i functions, "
1170 "%i insns turned to %i insns.\n\n",
1171 ncalls_inlined, nfunctions_inlined, initial_insns,
1172 overall_insns);
1173 free (order);
1174 return 0;
1177 /* Try to inline edge E from incremental inliner. MODE specifies mode
1178 of inliner.
1180 We are detecting cycles by storing mode of inliner into cgraph_node last
1181 time we visited it in the recursion. In general when mode is set, we have
1182 recursive inlining, but as an special case, we want to try harder inline
1183 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1184 flatten, b being always inline. Flattening 'a' will collapse
1185 a->b->c before hitting cycle. To accommodate always inline, we however
1186 need to inline a->b->c->b.
1188 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1189 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1190 static bool
1191 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1193 struct cgraph_node *callee = e->callee;
1194 enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
1195 bool always_inline = e->callee->local.disregard_inline_limits;
1197 /* We've hit cycle? */
1198 if (callee_mode)
1200 /* It is first time we see it and we are not in ALWAY_INLINE only
1201 mode yet. and the function in question is always_inline. */
1202 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1204 if (dump_file)
1206 indent_to (dump_file, depth);
1207 fprintf (dump_file,
1208 "Hit cycle in %s, switching to always inline only.\n",
1209 cgraph_node_name (callee));
1211 mode = INLINE_ALWAYS_INLINE;
1213 /* Otherwise it is time to give up. */
1214 else
1216 if (dump_file)
1218 indent_to (dump_file, depth);
1219 fprintf (dump_file,
1220 "Not inlining %s into %s to avoid cycle.\n",
1221 cgraph_node_name (callee),
1222 cgraph_node_name (e->caller));
1224 e->inline_failed = (e->callee->local.disregard_inline_limits
1225 ? N_("recursive inlining") : "");
1226 return false;
1230 callee->aux = (void *)(size_t) mode;
1231 if (dump_file)
1233 indent_to (dump_file, depth);
1234 fprintf (dump_file, " Inlining %s into %s.\n",
1235 cgraph_node_name (e->callee),
1236 cgraph_node_name (e->caller));
1238 if (e->inline_failed)
1239 cgraph_mark_inline (e);
1241 /* In order to fully inline always_inline functions at -O0, we need to
1242 recurse here, since the inlined functions might not be processed by
1243 incremental inlining at all yet.
1245 Also flattening needs to be done recursively. */
1247 if (!flag_unit_at_a_time || mode == INLINE_ALL || always_inline)
1248 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1249 callee->aux = (void *)(size_t) callee_mode;
1250 return true;
1253 /* Decide on the inlining. We do so in the topological order to avoid
1254 expenses on updating data structures.
1255 DEPTH is depth of recursion, used only for debug output. */
1257 static bool
1258 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1259 enum inlining_mode mode,
1260 int depth)
1262 struct cgraph_edge *e;
1263 bool inlined = false;
1264 const char *failed_reason;
1265 enum inlining_mode old_mode;
1267 #ifdef ENABLE_CHECKING
1268 verify_cgraph_node (node);
1269 #endif
1271 old_mode = (enum inlining_mode) (size_t)node->aux;
1273 if (mode != INLINE_ALWAYS_INLINE
1274 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1276 if (dump_file)
1278 indent_to (dump_file, depth);
1279 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1281 mode = INLINE_ALL;
1284 node->aux = (void *)(size_t) mode;
1286 /* First of all look for always inline functions. */
1287 for (e = node->callees; e; e = e->next_callee)
1289 if (!e->callee->local.disregard_inline_limits
1290 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1291 continue;
1292 if (CALL_CANNOT_INLINE_P (e->call_stmt))
1293 continue;
1294 /* When the edge is already inlined, we just need to recurse into
1295 it in order to fully flatten the leaves. */
1296 if (!e->inline_failed && mode == INLINE_ALL)
1298 inlined |= try_inline (e, mode, depth);
1299 continue;
1301 if (dump_file)
1303 indent_to (dump_file, depth);
1304 fprintf (dump_file,
1305 "Considering to always inline inline candidate %s.\n",
1306 cgraph_node_name (e->callee));
1308 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1310 if (dump_file)
1312 indent_to (dump_file, depth);
1313 fprintf (dump_file, "Not inlining: recursive call.\n");
1315 continue;
1317 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1318 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1320 if (dump_file)
1322 indent_to (dump_file, depth);
1323 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1325 continue;
1327 if (!DECL_SAVED_TREE (e->callee->decl) && !e->callee->inline_decl)
1329 if (dump_file)
1331 indent_to (dump_file, depth);
1332 fprintf (dump_file,
1333 "Not inlining: Function body no longer available.\n");
1335 continue;
1337 inlined |= try_inline (e, mode, depth);
1340 /* Now do the automatic inlining. */
1341 if (!flag_really_no_inline && mode != INLINE_ALL
1342 && mode != INLINE_ALWAYS_INLINE)
1343 for (e = node->callees; e; e = e->next_callee)
1345 if (!e->callee->local.inlinable
1346 || !e->inline_failed
1347 || e->callee->local.disregard_inline_limits)
1348 continue;
1349 if (dump_file)
1350 fprintf (dump_file, "Considering inline candidate %s.\n",
1351 cgraph_node_name (e->callee));
1352 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1354 if (dump_file)
1356 indent_to (dump_file, depth);
1357 fprintf (dump_file, "Not inlining: recursive call.\n");
1359 continue;
1361 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1362 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1364 if (dump_file)
1366 indent_to (dump_file, depth);
1367 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1369 continue;
1371 /* When the function body would grow and inlining the function won't
1372 eliminate the need for offline copy of the function, don't inline.
1374 if ((mode == INLINE_SIZE
1375 || (!flag_inline_functions
1376 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1377 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1378 > e->caller->global.insns)
1379 && cgraph_estimate_growth (e->callee) > 0)
1381 if (dump_file)
1383 indent_to (dump_file, depth);
1384 fprintf (dump_file,
1385 "Not inlining: code size would grow by %i insns.\n",
1386 cgraph_estimate_size_after_inlining (1, e->caller,
1387 e->callee)
1388 - e->caller->global.insns);
1390 continue;
1392 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1393 false)
1394 || CALL_CANNOT_INLINE_P (e->call_stmt))
1396 if (dump_file)
1398 indent_to (dump_file, depth);
1399 fprintf (dump_file, "Not inlining: %s.\n", e->inline_failed);
1401 continue;
1403 if (!DECL_SAVED_TREE (e->callee->decl) && !e->callee->inline_decl)
1405 if (dump_file)
1407 indent_to (dump_file, depth);
1408 fprintf (dump_file,
1409 "Not inlining: Function body no longer available.\n");
1411 continue;
1413 if (cgraph_default_inline_p (e->callee, &failed_reason))
1414 inlined |= try_inline (e, mode, depth);
1415 else if (!flag_unit_at_a_time)
1416 e->inline_failed = failed_reason;
1418 node->aux = (void *)(size_t) old_mode;
1419 return inlined;
1422 /* When inlining shall be performed. */
1423 static bool
1424 cgraph_gate_inlining (void)
1426 return flag_inline_trees;
1429 struct tree_opt_pass pass_ipa_inline =
1431 "inline", /* name */
1432 cgraph_gate_inlining, /* gate */
1433 cgraph_decide_inlining, /* execute */
1434 NULL, /* sub */
1435 NULL, /* next */
1436 0, /* static_pass_number */
1437 TV_INLINE_HEURISTICS, /* tv_id */
1438 0, /* properties_required */
1439 PROP_cfg, /* properties_provided */
1440 0, /* properties_destroyed */
1441 TODO_remove_functions, /* todo_flags_finish */
1442 TODO_dump_cgraph | TODO_dump_func
1443 | TODO_remove_functions, /* todo_flags_finish */
1444 0 /* letter */
1447 /* Because inlining might remove no-longer reachable nodes, we need to
1448 keep the array visible to garbage collector to avoid reading collected
1449 out nodes. */
1450 static int nnodes;
1451 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1453 /* Do inlining of small functions. Doing so early helps profiling and other
1454 passes to be somewhat more effective and avoids some code duplication in
1455 later real inlining pass for testcases with very many function calls. */
1456 static unsigned int
1457 cgraph_early_inlining (void)
1459 struct cgraph_node *node = cgraph_node (current_function_decl);
1460 unsigned int todo = 0;
1462 if (sorrycount || errorcount)
1463 return 0;
1464 if (cgraph_decide_inlining_incrementally (node,
1465 flag_unit_at_a_time || optimize_size
1466 ? INLINE_SIZE : INLINE_SPEED, 0))
1468 timevar_push (TV_INTEGRATION);
1469 todo = optimize_inline_calls (current_function_decl);
1470 timevar_pop (TV_INTEGRATION);
1472 return todo;
1475 /* When inlining shall be performed. */
1476 static bool
1477 cgraph_gate_early_inlining (void)
1479 return flag_inline_trees && flag_early_inlining;
1482 struct tree_opt_pass pass_early_inline =
1484 "einline", /* name */
1485 cgraph_gate_early_inlining, /* gate */
1486 cgraph_early_inlining, /* execute */
1487 NULL, /* sub */
1488 NULL, /* next */
1489 0, /* static_pass_number */
1490 TV_INLINE_HEURISTICS, /* tv_id */
1491 0, /* properties_required */
1492 PROP_cfg, /* properties_provided */
1493 0, /* properties_destroyed */
1494 0, /* todo_flags_start */
1495 TODO_dump_func, /* todo_flags_finish */
1496 0 /* letter */
1499 /* When inlining shall be performed. */
1500 static bool
1501 cgraph_gate_ipa_early_inlining (void)
1503 return (flag_inline_trees && flag_early_inlining
1504 && (flag_branch_probabilities || flag_test_coverage
1505 || profile_arc_flag));
1508 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1509 before tree profiling so we have stand alone IPA pass for doing so. */
1510 struct tree_opt_pass pass_ipa_early_inline =
1512 "einline_ipa", /* name */
1513 cgraph_gate_ipa_early_inlining, /* gate */
1514 NULL, /* execute */
1515 NULL, /* sub */
1516 NULL, /* next */
1517 0, /* static_pass_number */
1518 TV_INLINE_HEURISTICS, /* tv_id */
1519 0, /* properties_required */
1520 PROP_cfg, /* properties_provided */
1521 0, /* properties_destroyed */
1522 0, /* todo_flags_start */
1523 TODO_dump_cgraph, /* todo_flags_finish */
1524 0 /* letter */
1527 /* Compute parameters of functions used by inliner. */
1528 static unsigned int
1529 compute_inline_parameters (void)
1531 struct cgraph_node *node = cgraph_node (current_function_decl);
1533 gcc_assert (!node->global.inlined_to);
1534 node->local.estimated_self_stack_size = estimated_stack_frame_size ();
1535 node->global.estimated_stack_size = node->local.estimated_self_stack_size;
1536 node->global.stack_frame_offset = 0;
1537 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1538 node->local.self_insns = estimate_num_insns (current_function_decl,
1539 &eni_inlining_weights);
1540 if (node->local.inlinable && !node->local.disregard_inline_limits)
1541 node->local.disregard_inline_limits
1542 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
1543 if (flag_really_no_inline && !node->local.disregard_inline_limits)
1544 node->local.inlinable = 0;
1545 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1546 node->global.insns = node->local.self_insns;
1547 return 0;
1550 /* When inlining shall be performed. */
1551 static bool
1552 gate_inline_passes (void)
1554 return flag_inline_trees;
1557 struct tree_opt_pass pass_inline_parameters =
1559 NULL, /* name */
1560 gate_inline_passes, /* gate */
1561 compute_inline_parameters, /* execute */
1562 NULL, /* sub */
1563 NULL, /* next */
1564 0, /* static_pass_number */
1565 TV_INLINE_HEURISTICS, /* tv_id */
1566 0, /* properties_required */
1567 PROP_cfg, /* properties_provided */
1568 0, /* properties_destroyed */
1569 0, /* todo_flags_start */
1570 0, /* todo_flags_finish */
1571 0 /* letter */
1574 /* Apply inline plan to the function. */
1575 static unsigned int
1576 apply_inline (void)
1578 unsigned int todo = 0;
1579 struct cgraph_edge *e;
1580 struct cgraph_node *node = cgraph_node (current_function_decl);
1582 /* Even when not optimizing, ensure that always_inline functions get inlined.
1584 if (!optimize)
1585 cgraph_decide_inlining_incrementally (node, INLINE_SPEED, 0);
1587 /* We might need the body of this function so that we can expand
1588 it inline somewhere else. */
1589 if (cgraph_preserve_function_body_p (current_function_decl))
1590 save_inline_function_body (node);
1592 for (e = node->callees; e; e = e->next_callee)
1593 if (!e->inline_failed || warn_inline)
1594 break;
1595 if (e)
1597 timevar_push (TV_INTEGRATION);
1598 todo = optimize_inline_calls (current_function_decl);
1599 timevar_pop (TV_INTEGRATION);
1601 /* In non-unit-at-a-time we must mark all referenced functions as needed. */
1602 if (!flag_unit_at_a_time)
1604 struct cgraph_edge *e;
1605 for (e = node->callees; e; e = e->next_callee)
1606 if (e->callee->analyzed)
1607 cgraph_mark_needed_node (e->callee);
1609 return todo | execute_fixup_cfg ();
1612 struct tree_opt_pass pass_apply_inline =
1614 "apply_inline", /* name */
1615 NULL, /* gate */
1616 apply_inline, /* execute */
1617 NULL, /* sub */
1618 NULL, /* next */
1619 0, /* static_pass_number */
1620 TV_INLINE_HEURISTICS, /* tv_id */
1621 0, /* properties_required */
1622 PROP_cfg, /* properties_provided */
1623 0, /* properties_destroyed */
1624 0, /* todo_flags_start */
1625 TODO_dump_func | TODO_verify_flow
1626 | TODO_verify_stmts, /* todo_flags_finish */
1627 0 /* letter */
1630 #include "gt-ipa-inline.h"