2010-07-27 Paolo Carlini <paolo.carlini@oracle.com>
[official-gcc/alias-decl.git] / gcc / ipa-inline.c
blobb5063bf8c228e02979b7446d9626da4b31d27a48
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
29 There are three major parts of this file:
31 cgraph_mark_inline implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
43 inlining heuristics
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used by early inliner.
66 The inliner itself is split into several passes:
68 pass_inline_parameters
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
79 pass_early_inlining
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
86 The pass is run during conversion into SSA form. Only functions already
87 converted into SSA form are inlined, so the conversion must happen in
88 topological order on the callgraph (that is maintained by pass manager).
89 The functions after inlining are early optimized so the early inliner sees
90 unoptimized function itself, but all considered callees are already
91 optimized allowing it to unfold abstraction penalty on C++ effectively and
92 cheaply.
94 pass_ipa_early_inlining
96 With profiling, the early inlining is also necessary to reduce
97 instrumentation costs on program with high abstraction penalty (doing
98 many redundant calls). This can't happen in parallel with early
99 optimization and profile instrumentation, because we would end up
100 re-instrumenting already instrumented function bodies we brought in via
101 inlining.
103 To avoid this, this pass is executed as IPA pass before profiling. It is
104 simple wrapper to pass_early_inlining and ensures first inlining.
106 pass_ipa_inline
108 This is the main pass implementing simple greedy algorithm to do inlining
109 of small functions that results in overall growth of compilation unit and
110 inlining of functions called once. The pass compute just so called inline
111 plan (representation of inlining to be done in callgraph) and unlike early
112 inlining it is not performing the inlining itself.
114 pass_apply_inline
116 This pass performs actual inlining according to pass_ipa_inline on given
117 function. Possible the function body before inlining is saved when it is
118 needed for further inlining later.
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "tm.h"
125 #include "tree.h"
126 #include "tree-inline.h"
127 #include "langhooks.h"
128 #include "flags.h"
129 #include "cgraph.h"
130 #include "diagnostic.h"
131 #include "gimple-pretty-print.h"
132 #include "timevar.h"
133 #include "params.h"
134 #include "fibheap.h"
135 #include "intl.h"
136 #include "tree-pass.h"
137 #include "hashtab.h"
138 #include "coverage.h"
139 #include "ggc.h"
140 #include "tree-flow.h"
141 #include "rtl.h"
142 #include "ipa-prop.h"
143 #include "except.h"
145 #define MAX_TIME 1000000000
147 /* Mode incremental inliner operate on:
149 In ALWAYS_INLINE only functions marked
150 always_inline are inlined. This mode is used after detecting cycle during
151 flattening.
153 In SIZE mode, only functions that reduce function body size after inlining
154 are inlined, this is used during early inlining.
156 in ALL mode, everything is inlined. This is used during flattening. */
157 enum inlining_mode {
158 INLINE_NONE = 0,
159 INLINE_ALWAYS_INLINE,
160 INLINE_SIZE_NORECURSIVE,
161 INLINE_SIZE,
162 INLINE_ALL
165 static bool
166 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode);
167 static void cgraph_flatten (struct cgraph_node *node);
170 /* Statistics we collect about inlining algorithm. */
171 static int ncalls_inlined;
172 static int nfunctions_inlined;
173 static int overall_size;
174 static gcov_type max_count, max_benefit;
176 /* Holders of ipa cgraph hooks: */
177 static struct cgraph_node_hook_list *function_insertion_hook_holder;
179 static inline struct inline_summary *
180 inline_summary (struct cgraph_node *node)
182 return &node->local.inline_summary;
185 /* Estimate self time of the function after inlining WHAT into TO. */
187 static int
188 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
189 struct cgraph_node *what)
191 gcov_type time = (((gcov_type)what->global.time
192 - inline_summary (what)->time_inlining_benefit)
193 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
194 + to->global.time;
195 if (time < 0)
196 time = 0;
197 if (time > MAX_TIME)
198 time = MAX_TIME;
199 return time;
202 /* Estimate self time of the function after inlining WHAT into TO. */
204 static inline int
205 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
206 struct cgraph_node *what)
208 int size = ((what->global.size - inline_summary (what)->size_inlining_benefit)
209 * times + to->global.size);
210 gcc_assert (size >= 0);
211 return size;
214 /* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
215 by NEST. */
217 static void
218 update_noncloned_frequencies (struct cgraph_node *node,
219 int freq_scale, int nest)
221 struct cgraph_edge *e;
223 /* We do not want to ignore high loop nest after freq drops to 0. */
224 if (!freq_scale)
225 freq_scale = 1;
226 for (e = node->callees; e; e = e->next_callee)
228 e->loop_nest += nest;
229 e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
230 if (e->frequency > CGRAPH_FREQ_MAX)
231 e->frequency = CGRAPH_FREQ_MAX;
232 if (!e->inline_failed)
233 update_noncloned_frequencies (e->callee, freq_scale, nest);
237 /* E is expected to be an edge being inlined. Clone destination node of
238 the edge and redirect it to the new clone.
239 DUPLICATE is used for bookkeeping on whether we are actually creating new
240 clones or re-using node originally representing out-of-line function call.
242 void
243 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
244 bool update_original)
246 HOST_WIDE_INT peak;
248 if (duplicate)
250 /* We may eliminate the need for out-of-line copy to be output.
251 In that case just go ahead and re-use it. */
252 if (!e->callee->callers->next_caller
253 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
254 /* Don't reuse if more than one function shares a comdat group.
255 If the other function(s) are needed, we need to emit even
256 this function out of line. */
257 && !e->callee->same_comdat_group
258 && !cgraph_new_nodes)
260 gcc_assert (!e->callee->global.inlined_to);
261 if (e->callee->analyzed)
263 overall_size -= e->callee->global.size;
264 nfunctions_inlined++;
266 duplicate = false;
267 e->callee->local.externally_visible = false;
268 update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
270 else
272 struct cgraph_node *n;
273 n = cgraph_clone_node (e->callee, e->callee->decl,
274 e->count, e->frequency, e->loop_nest,
275 update_original, NULL);
276 cgraph_redirect_edge_callee (e, n);
280 if (e->caller->global.inlined_to)
281 e->callee->global.inlined_to = e->caller->global.inlined_to;
282 else
283 e->callee->global.inlined_to = e->caller;
284 e->callee->global.stack_frame_offset
285 = e->caller->global.stack_frame_offset
286 + inline_summary (e->caller)->estimated_self_stack_size;
287 peak = e->callee->global.stack_frame_offset
288 + inline_summary (e->callee)->estimated_self_stack_size;
289 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
290 e->callee->global.inlined_to->global.estimated_stack_size = peak;
291 cgraph_propagate_frequency (e->callee);
293 /* Recursively clone all bodies. */
294 for (e = e->callee->callees; e; e = e->next_callee)
295 if (!e->inline_failed)
296 cgraph_clone_inlined_nodes (e, duplicate, update_original);
299 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
300 specify whether profile of original function should be updated. If any new
301 indirect edges are discovered in the process, add them to NEW_EDGES, unless
302 it is NULL. Return true iff any new callgraph edges were discovered as a
303 result of inlining. */
305 static bool
306 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
307 VEC (cgraph_edge_p, heap) **new_edges)
309 int old_size = 0, new_size = 0;
310 struct cgraph_node *to = NULL, *what;
311 struct cgraph_edge *curr = e;
312 int freq;
314 gcc_assert (e->inline_failed);
315 e->inline_failed = CIF_OK;
316 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
318 cgraph_clone_inlined_nodes (e, true, update_original);
320 what = e->callee;
322 freq = e->frequency;
323 /* Now update size of caller and all functions caller is inlined into. */
324 for (;e && !e->inline_failed; e = e->caller->callers)
326 to = e->caller;
327 old_size = e->caller->global.size;
328 new_size = cgraph_estimate_size_after_inlining (1, to, what);
329 to->global.size = new_size;
330 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
332 gcc_assert (what->global.inlined_to == to);
333 if (new_size > old_size)
334 overall_size += new_size - old_size;
335 ncalls_inlined++;
337 /* FIXME: We should remove the optimize check after we ensure we never run
338 IPA passes when not optimizng. */
339 if (flag_indirect_inlining && optimize)
340 return ipa_propagate_indirect_call_infos (curr, new_edges);
341 else
342 return false;
345 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER. */
347 static void
348 cgraph_mark_inline (struct cgraph_edge *edge)
350 struct cgraph_node *to = edge->caller;
351 struct cgraph_node *what = edge->callee;
352 struct cgraph_edge *e, *next;
354 gcc_assert (!edge->call_stmt_cannot_inline_p);
355 /* Look for all calls, mark them inline and clone recursively
356 all inlined functions. */
357 for (e = what->callers; e; e = next)
359 next = e->next_caller;
360 if (e->caller == to && e->inline_failed)
362 cgraph_mark_inline_edge (e, true, NULL);
363 if (e == edge)
364 edge = next;
369 /* Estimate the growth caused by inlining NODE into all callees. */
371 static int
372 cgraph_estimate_growth (struct cgraph_node *node)
374 int growth = 0;
375 struct cgraph_edge *e;
376 bool self_recursive = false;
378 if (node->global.estimated_growth != INT_MIN)
379 return node->global.estimated_growth;
381 for (e = node->callers; e; e = e->next_caller)
383 if (e->caller == node)
384 self_recursive = true;
385 if (e->inline_failed)
386 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
387 - e->caller->global.size);
390 /* ??? Wrong for non-trivially self recursive functions or cases where
391 we decide to not inline for different reasons, but it is not big deal
392 as in that case we will keep the body around, but we will also avoid
393 some inlining. */
394 if (cgraph_will_be_removed_from_program_if_no_direct_calls (node)
395 && !DECL_EXTERNAL (node->decl) && !self_recursive)
396 growth -= node->global.size;
398 node->global.estimated_growth = growth;
399 return growth;
402 /* Return false when inlining WHAT into TO is not good idea
403 as it would cause too large growth of function bodies.
404 When ONE_ONLY is true, assume that only one call site is going
405 to be inlined, otherwise figure out how many call sites in
406 TO calls WHAT and verify that all can be inlined.
409 static bool
410 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
411 cgraph_inline_failed_t *reason, bool one_only)
413 int times = 0;
414 struct cgraph_edge *e;
415 int newsize;
416 int limit;
417 HOST_WIDE_INT stack_size_limit, inlined_stack;
419 if (one_only)
420 times = 1;
421 else
422 for (e = to->callees; e; e = e->next_callee)
423 if (e->callee == what)
424 times++;
426 if (to->global.inlined_to)
427 to = to->global.inlined_to;
429 /* When inlining large function body called once into small function,
430 take the inlined function as base for limiting the growth. */
431 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
432 limit = inline_summary (to)->self_size;
433 else
434 limit = inline_summary (what)->self_size;
436 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
438 /* Check the size after inlining against the function limits. But allow
439 the function to shrink if it went over the limits by forced inlining. */
440 newsize = cgraph_estimate_size_after_inlining (times, to, what);
441 if (newsize >= to->global.size
442 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
443 && newsize > limit)
445 if (reason)
446 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
447 return false;
450 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
452 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
454 inlined_stack = (to->global.stack_frame_offset
455 + inline_summary (to)->estimated_self_stack_size
456 + what->global.estimated_stack_size);
457 if (inlined_stack > stack_size_limit
458 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
460 if (reason)
461 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
462 return false;
464 return true;
467 /* Return true when function N is small enough to be inlined. */
469 static bool
470 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
472 tree decl = n->decl;
474 if (n->local.disregard_inline_limits)
475 return true;
477 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
479 if (reason)
480 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
481 return false;
484 if (!n->analyzed)
486 if (reason)
487 *reason = CIF_BODY_NOT_AVAILABLE;
488 return false;
491 if (DECL_DECLARED_INLINE_P (decl))
493 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
495 if (reason)
496 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
497 return false;
500 else
502 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
504 if (reason)
505 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
506 return false;
510 return true;
513 /* Return true when inlining WHAT would create recursive inlining.
514 We call recursive inlining all cases where same function appears more than
515 once in the single recursion nest path in the inline graph. */
517 static inline bool
518 cgraph_recursive_inlining_p (struct cgraph_node *to,
519 struct cgraph_node *what,
520 cgraph_inline_failed_t *reason)
522 bool recursive;
523 if (to->global.inlined_to)
524 recursive = what->decl == to->global.inlined_to->decl;
525 else
526 recursive = what->decl == to->decl;
527 /* Marking recursive function inline has sane semantic and thus we should
528 not warn on it. */
529 if (recursive && reason)
530 *reason = (what->local.disregard_inline_limits
531 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
532 return recursive;
535 /* A cost model driving the inlining heuristics in a way so the edges with
536 smallest badness are inlined first. After each inlining is performed
537 the costs of all caller edges of nodes affected are recomputed so the
538 metrics may accurately depend on values such as number of inlinable callers
539 of the function or function body size. */
541 static int
542 cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
544 gcov_type badness;
545 int growth =
546 (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
547 - edge->caller->global.size);
549 if (edge->callee->local.disregard_inline_limits)
550 return INT_MIN;
552 if (dump)
554 fprintf (dump_file, " Badness calculcation for %s -> %s\n",
555 cgraph_node_name (edge->caller),
556 cgraph_node_name (edge->callee));
557 fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
558 growth,
559 edge->callee->global.time,
560 inline_summary (edge->callee)->time_inlining_benefit,
561 edge->callee->global.size,
562 inline_summary (edge->callee)->size_inlining_benefit);
565 /* Always prefer inlining saving code size. */
566 if (growth <= 0)
568 badness = INT_MIN - growth;
569 if (dump)
570 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
571 growth);
574 /* When profiling is available, base priorities -(#calls / growth).
575 So we optimize for overall number of "executed" inlined calls. */
576 else if (max_count)
578 badness =
579 ((int)
580 ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
581 (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
582 if (dump)
584 fprintf (dump_file,
585 " %i (relative %f): profile info. Relative count %f"
586 " * Relative benefit %f\n",
587 (int) badness, (double) badness / INT_MIN,
588 (double) edge->count / max_count,
589 (double) (inline_summary (edge->callee)->
590 time_inlining_benefit + 1) / (max_benefit + 1));
594 /* When function local profile is available, base priorities on
595 growth / frequency, so we optimize for overall frequency of inlined
596 calls. This is not too accurate since while the call might be frequent
597 within function, the function itself is infrequent.
599 Other objective to optimize for is number of different calls inlined.
600 We add the estimated growth after inlining all functions to bias the
601 priorities slightly in this direction (so fewer times called functions
602 of the same size gets priority). */
603 else if (flag_guess_branch_prob)
605 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
606 int benefitperc;
607 int growth_for_all;
608 badness = growth * 10000;
609 benefitperc =
610 MIN (100 * inline_summary (edge->callee)->time_inlining_benefit /
611 (edge->callee->global.time + 1) +1, 100);
612 div *= benefitperc;
615 /* Decrease badness if call is nested. */
616 /* Compress the range so we don't overflow. */
617 if (div > 10000)
618 div = 10000 + ceil_log2 (div) - 8;
619 if (div < 1)
620 div = 1;
621 if (badness > 0)
622 badness /= div;
623 growth_for_all = cgraph_estimate_growth (edge->callee);
624 badness += growth_for_all;
625 if (badness > INT_MAX)
626 badness = INT_MAX;
627 if (dump)
629 fprintf (dump_file,
630 " %i: guessed profile. frequency %i, overall growth %i,"
631 " benefit %i%%, divisor %i\n",
632 (int) badness, edge->frequency, growth_for_all, benefitperc, div);
635 /* When function local profile is not available or it does not give
636 useful information (ie frequency is zero), base the cost on
637 loop nest and overall size growth, so we optimize for overall number
638 of functions fully inlined in program. */
639 else
641 int nest = MIN (edge->loop_nest, 8);
642 badness = cgraph_estimate_growth (edge->callee) * 256;
644 /* Decrease badness if call is nested. */
645 if (badness > 0)
646 badness >>= nest;
647 else
649 badness <<= nest;
651 if (dump)
652 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
653 nest);
656 /* Ensure that we did not overflow in all the fixed point math above. */
657 gcc_assert (badness >= INT_MIN);
658 gcc_assert (badness <= INT_MAX - 1);
659 /* Make recursive inlining happen always after other inlining is done. */
660 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
661 return badness + 1;
662 else
663 return badness;
666 /* Recompute badness of EDGE and update its key in HEAP if needed. */
667 static void
668 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
670 int badness = cgraph_edge_badness (edge, false);
671 if (edge->aux)
673 fibnode_t n = (fibnode_t) edge->aux;
674 gcc_checking_assert (n->data == edge);
676 /* fibheap_replace_key only decrease the keys.
677 When we increase the key we do not update heap
678 and instead re-insert the element once it becomes
679 a minium of heap. */
680 if (badness < n->key)
682 fibheap_replace_key (heap, n, badness);
683 gcc_checking_assert (n->key == badness);
686 else
687 edge->aux = fibheap_insert (heap, badness, edge);
690 /* Recompute heap nodes for each of caller edge. */
692 static void
693 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
694 bitmap updated_nodes)
696 struct cgraph_edge *edge;
697 cgraph_inline_failed_t failed_reason;
699 if (!node->local.inlinable
700 || node->global.inlined_to)
701 return;
702 if (bitmap_bit_p (updated_nodes, node->uid))
703 return;
704 bitmap_set_bit (updated_nodes, node->uid);
705 node->global.estimated_growth = INT_MIN;
707 /* See if there is something to do. */
708 for (edge = node->callers; edge; edge = edge->next_caller)
709 if (edge->inline_failed)
710 break;
711 if (!edge)
712 return;
713 /* Prune out edges we won't inline into anymore. */
714 if (!cgraph_default_inline_p (node, &failed_reason))
716 for (; edge; edge = edge->next_caller)
717 if (edge->aux)
719 fibheap_delete_node (heap, (fibnode_t) edge->aux);
720 edge->aux = NULL;
721 if (edge->inline_failed)
722 edge->inline_failed = failed_reason;
724 return;
727 for (; edge; edge = edge->next_caller)
728 if (edge->inline_failed)
729 update_edge_key (heap, edge);
732 /* Recompute heap nodes for each uninlined call.
733 This is used when we know that edge badnesses are going only to increase
734 (we introduced new call site) and thus all we need is to insert newly
735 created edges into heap. */
737 static void
738 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
739 bitmap updated_nodes)
741 struct cgraph_edge *e = node->callees;
742 node->global.estimated_growth = INT_MIN;
744 if (!e)
745 return;
746 while (true)
747 if (!e->inline_failed && e->callee->callees)
748 e = e->callee->callees;
749 else
751 if (e->inline_failed
752 && e->callee->local.inlinable
753 && !bitmap_bit_p (updated_nodes, e->callee->uid))
755 node->global.estimated_growth = INT_MIN;
756 /* If function becomes uninlinable, we need to remove it from the heap. */
757 if (!cgraph_default_inline_p (e->callee, &e->inline_failed))
758 update_caller_keys (heap, e->callee, updated_nodes);
759 else
760 /* Otherwise update just edge E. */
761 update_edge_key (heap, e);
763 if (e->next_callee)
764 e = e->next_callee;
765 else
769 if (e->caller == node)
770 return;
771 e = e->caller->callers;
773 while (!e->next_callee);
774 e = e->next_callee;
779 /* Recompute heap nodes for each of caller edges of each of callees.
780 Walk recursively into all inline clones. */
782 static void
783 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
784 bitmap updated_nodes)
786 struct cgraph_edge *e = node->callees;
787 node->global.estimated_growth = INT_MIN;
789 if (!e)
790 return;
791 while (true)
792 if (!e->inline_failed && e->callee->callees)
793 e = e->callee->callees;
794 else
796 if (e->inline_failed)
797 update_caller_keys (heap, e->callee, updated_nodes);
798 if (e->next_callee)
799 e = e->next_callee;
800 else
804 if (e->caller == node)
805 return;
806 e = e->caller->callers;
808 while (!e->next_callee);
809 e = e->next_callee;
814 /* Enqueue all recursive calls from NODE into priority queue depending on
815 how likely we want to recursively inline the call. */
817 static void
818 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
819 fibheap_t heap)
821 static int priority;
822 struct cgraph_edge *e;
823 for (e = where->callees; e; e = e->next_callee)
824 if (e->callee == node)
826 /* When profile feedback is available, prioritize by expected number
827 of calls. Without profile feedback we maintain simple queue
828 to order candidates via recursive depths. */
829 fibheap_insert (heap,
830 !max_count ? priority++
831 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
834 for (e = where->callees; e; e = e->next_callee)
835 if (!e->inline_failed)
836 lookup_recursive_calls (node, e->callee, heap);
839 /* Decide on recursive inlining: in the case function has recursive calls,
840 inline until body size reaches given argument. If any new indirect edges
841 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
842 is NULL. */
844 static bool
845 cgraph_decide_recursive_inlining (struct cgraph_node *node,
846 VEC (cgraph_edge_p, heap) **new_edges)
848 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
849 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
850 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
851 fibheap_t heap;
852 struct cgraph_edge *e;
853 struct cgraph_node *master_clone, *next;
854 int depth = 0;
855 int n = 0;
857 /* It does not make sense to recursively inline always-inline functions
858 as we are going to sorry() on the remaining calls anyway. */
859 if (node->local.disregard_inline_limits
860 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl)))
861 return false;
863 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
864 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
865 return false;
867 if (DECL_DECLARED_INLINE_P (node->decl))
869 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
870 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
873 /* Make sure that function is small enough to be considered for inlining. */
874 if (!max_depth
875 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
876 return false;
877 heap = fibheap_new ();
878 lookup_recursive_calls (node, node, heap);
879 if (fibheap_empty (heap))
881 fibheap_delete (heap);
882 return false;
885 if (dump_file)
886 fprintf (dump_file,
887 " Performing recursive inlining on %s\n",
888 cgraph_node_name (node));
890 /* We need original clone to copy around. */
891 master_clone = cgraph_clone_node (node, node->decl,
892 node->count, CGRAPH_FREQ_BASE, 1,
893 false, NULL);
894 master_clone->needed = true;
895 for (e = master_clone->callees; e; e = e->next_callee)
896 if (!e->inline_failed)
897 cgraph_clone_inlined_nodes (e, true, false);
899 /* Do the inlining and update list of recursive call during process. */
900 while (!fibheap_empty (heap)
901 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
902 <= limit))
904 struct cgraph_edge *curr
905 = (struct cgraph_edge *) fibheap_extract_min (heap);
906 struct cgraph_node *cnode;
908 depth = 1;
909 for (cnode = curr->caller;
910 cnode->global.inlined_to; cnode = cnode->callers->caller)
911 if (node->decl == curr->callee->decl)
912 depth++;
913 if (depth > max_depth)
915 if (dump_file)
916 fprintf (dump_file,
917 " maximal depth reached\n");
918 continue;
921 if (max_count)
923 if (!cgraph_maybe_hot_edge_p (curr))
925 if (dump_file)
926 fprintf (dump_file, " Not inlining cold call\n");
927 continue;
929 if (curr->count * 100 / node->count < probability)
931 if (dump_file)
932 fprintf (dump_file,
933 " Probability of edge is too small\n");
934 continue;
938 if (dump_file)
940 fprintf (dump_file,
941 " Inlining call of depth %i", depth);
942 if (node->count)
944 fprintf (dump_file, " called approx. %.2f times per call",
945 (double)curr->count / node->count);
947 fprintf (dump_file, "\n");
949 cgraph_redirect_edge_callee (curr, master_clone);
950 cgraph_mark_inline_edge (curr, false, new_edges);
951 lookup_recursive_calls (node, curr->callee, heap);
952 n++;
954 if (!fibheap_empty (heap) && dump_file)
955 fprintf (dump_file, " Recursive inlining growth limit met.\n");
957 fibheap_delete (heap);
958 if (dump_file)
959 fprintf (dump_file,
960 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
961 master_clone->global.size, node->global.size,
962 master_clone->global.time, node->global.time);
964 /* Remove master clone we used for inlining. We rely that clones inlined
965 into master clone gets queued just before master clone so we don't
966 need recursion. */
967 for (node = cgraph_nodes; node != master_clone;
968 node = next)
970 next = node->next;
971 if (node->global.inlined_to == master_clone)
972 cgraph_remove_node (node);
974 cgraph_remove_node (master_clone);
975 /* FIXME: Recursive inlining actually reduces number of calls of the
976 function. At this place we should probably walk the function and
977 inline clones and compensate the counts accordingly. This probably
978 doesn't matter much in practice. */
979 return n > 0;
982 /* Set inline_failed for all callers of given function to REASON. */
984 static void
985 cgraph_set_inline_failed (struct cgraph_node *node,
986 cgraph_inline_failed_t reason)
988 struct cgraph_edge *e;
990 if (dump_file)
991 fprintf (dump_file, "Inlining failed: %s\n",
992 cgraph_inline_failed_string (reason));
993 for (e = node->callers; e; e = e->next_caller)
994 if (e->inline_failed)
995 e->inline_failed = reason;
998 /* Given whole compilation unit estimate of INSNS, compute how large we can
999 allow the unit to grow. */
1000 static int
1001 compute_max_insns (int insns)
1003 int max_insns = insns;
1004 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1005 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1007 return ((HOST_WIDEST_INT) max_insns
1008 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1011 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1012 static void
1013 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
1015 while (VEC_length (cgraph_edge_p, new_edges) > 0)
1017 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
1019 gcc_assert (!edge->aux);
1020 if (edge->callee->local.inlinable
1021 && cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1022 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1027 /* We use greedy algorithm for inlining of small functions:
1028 All inline candidates are put into prioritized heap based on estimated
1029 growth of the overall number of instructions and then update the estimates.
1031 INLINED and INLINED_CALEES are just pointers to arrays large enough
1032 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
1034 static void
1035 cgraph_decide_inlining_of_small_functions (void)
1037 struct cgraph_node *node;
1038 struct cgraph_edge *edge;
1039 cgraph_inline_failed_t failed_reason;
1040 fibheap_t heap = fibheap_new ();
1041 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1042 int min_size, max_size;
1043 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1045 if (flag_indirect_inlining)
1046 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1048 if (dump_file)
1049 fprintf (dump_file, "\nDeciding on smaller functions:\n");
1051 /* Put all inline candidates into the heap. */
1053 for (node = cgraph_nodes; node; node = node->next)
1055 if (!node->local.inlinable || !node->callers)
1056 continue;
1057 if (dump_file)
1058 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
1060 node->global.estimated_growth = INT_MIN;
1061 if (!cgraph_default_inline_p (node, &failed_reason))
1063 cgraph_set_inline_failed (node, failed_reason);
1064 continue;
1067 for (edge = node->callers; edge; edge = edge->next_caller)
1068 if (edge->inline_failed)
1070 gcc_assert (!edge->aux);
1071 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1075 max_size = compute_max_insns (overall_size);
1076 min_size = overall_size;
1078 while (overall_size <= max_size
1079 && !fibheap_empty (heap))
1081 int old_size = overall_size;
1082 struct cgraph_node *where, *callee;
1083 int badness = fibheap_min_key (heap);
1084 int current_badness;
1085 int growth;
1086 cgraph_inline_failed_t not_good = CIF_OK;
1088 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1089 gcc_assert (edge->aux);
1090 edge->aux = NULL;
1091 if (!edge->inline_failed)
1092 continue;
1094 /* When updating the edge costs, we only decrease badness in the keys.
1095 When the badness increase, we keep the heap as it is and re-insert
1096 key now. */
1097 current_badness = cgraph_edge_badness (edge, false);
1098 gcc_assert (current_badness >= badness);
1099 if (current_badness != badness)
1101 edge->aux = fibheap_insert (heap, current_badness, edge);
1102 continue;
1105 callee = edge->callee;
1107 growth = (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
1108 - edge->caller->global.size);
1110 if (dump_file)
1112 fprintf (dump_file,
1113 "\nConsidering %s with %i size\n",
1114 cgraph_node_name (edge->callee),
1115 edge->callee->global.size);
1116 fprintf (dump_file,
1117 " to be inlined into %s in %s:%i\n"
1118 " Estimated growth after inlined into all callees is %+i insns.\n"
1119 " Estimated badness is %i, frequency %.2f.\n",
1120 cgraph_node_name (edge->caller),
1121 flag_wpa ? "unknown"
1122 : gimple_filename ((const_gimple) edge->call_stmt),
1123 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1124 cgraph_estimate_growth (edge->callee),
1125 badness,
1126 edge->frequency / (double)CGRAPH_FREQ_BASE);
1127 if (edge->count)
1128 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1129 if (dump_flags & TDF_DETAILS)
1130 cgraph_edge_badness (edge, true);
1133 /* When not having profile info ready we don't weight by any way the
1134 position of call in procedure itself. This means if call of
1135 function A from function B seems profitable to inline, the recursive
1136 call of function A in inline copy of A in B will look profitable too
1137 and we end up inlining until reaching maximal function growth. This
1138 is not good idea so prohibit the recursive inlining.
1140 ??? When the frequencies are taken into account we might not need this
1141 restriction.
1143 We need to be cureful here, in some testcases, e.g. directivec.c in
1144 libcpp, we can estimate self recursive function to have negative growth
1145 for inlining completely.
1147 if (!edge->count)
1149 where = edge->caller;
1150 while (where->global.inlined_to)
1152 if (where->decl == edge->callee->decl)
1153 break;
1154 where = where->callers->caller;
1156 if (where->global.inlined_to)
1158 edge->inline_failed
1159 = (edge->callee->local.disregard_inline_limits
1160 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1161 if (dump_file)
1162 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
1163 continue;
1167 if (edge->callee->local.disregard_inline_limits)
1169 else if (!cgraph_maybe_hot_edge_p (edge))
1170 not_good = CIF_UNLIKELY_CALL;
1171 else if (!flag_inline_functions
1172 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
1173 not_good = CIF_NOT_DECLARED_INLINED;
1174 else if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
1175 not_good = CIF_OPTIMIZING_FOR_SIZE;
1176 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
1178 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1179 &edge->inline_failed))
1181 edge->inline_failed = not_good;
1182 if (dump_file)
1183 fprintf (dump_file, " inline_failed:%s.\n",
1184 cgraph_inline_failed_string (edge->inline_failed));
1186 continue;
1188 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1190 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1191 &edge->inline_failed))
1193 if (dump_file)
1194 fprintf (dump_file, " inline_failed:%s.\n",
1195 cgraph_inline_failed_string (edge->inline_failed));
1197 continue;
1199 if (!tree_can_inline_p (edge))
1201 if (dump_file)
1202 fprintf (dump_file, " inline_failed:%s.\n",
1203 cgraph_inline_failed_string (edge->inline_failed));
1204 continue;
1206 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1207 &edge->inline_failed))
1209 where = edge->caller;
1210 if (where->global.inlined_to)
1211 where = where->global.inlined_to;
1212 if (!cgraph_decide_recursive_inlining (where,
1213 flag_indirect_inlining
1214 ? &new_indirect_edges : NULL))
1215 continue;
1216 if (flag_indirect_inlining)
1217 add_new_edges_to_heap (heap, new_indirect_edges);
1218 update_all_callee_keys (heap, where, updated_nodes);
1220 else
1222 struct cgraph_node *callee;
1223 if (edge->call_stmt_cannot_inline_p
1224 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1225 &edge->inline_failed, true))
1227 if (dump_file)
1228 fprintf (dump_file, " Not inlining into %s:%s.\n",
1229 cgraph_node_name (edge->caller),
1230 cgraph_inline_failed_string (edge->inline_failed));
1231 continue;
1233 callee = edge->callee;
1234 gcc_checking_assert (!callee->global.inlined_to);
1235 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1236 if (flag_indirect_inlining)
1237 add_new_edges_to_heap (heap, new_indirect_edges);
1239 /* We inlined last offline copy to the body. This might lead
1240 to callees of function having fewer call sites and thus they
1241 may need updating. */
1242 if (callee->global.inlined_to)
1243 update_all_callee_keys (heap, callee, updated_nodes);
1244 else
1245 update_callee_keys (heap, edge->callee, updated_nodes);
1247 where = edge->caller;
1248 if (where->global.inlined_to)
1249 where = where->global.inlined_to;
1251 /* Our profitability metric can depend on local properties
1252 such as number of inlinable calls and size of the function body.
1253 After inlining these properties might change for the function we
1254 inlined into (since it's body size changed) and for the functions
1255 called by function we inlined (since number of it inlinable callers
1256 might change). */
1257 update_caller_keys (heap, where, updated_nodes);
1259 /* We removed one call of the function we just inlined. If offline
1260 copy is still needed, be sure to update the keys. */
1261 if (callee != where && !callee->global.inlined_to)
1262 update_caller_keys (heap, callee, updated_nodes);
1263 bitmap_clear (updated_nodes);
1265 if (dump_file)
1267 fprintf (dump_file,
1268 " Inlined into %s which now has size %i and self time %i,"
1269 "net change of %+i.\n",
1270 cgraph_node_name (edge->caller),
1271 edge->caller->global.time,
1272 edge->caller->global.size,
1273 overall_size - old_size);
1275 if (min_size > overall_size)
1277 min_size = overall_size;
1278 max_size = compute_max_insns (min_size);
1280 if (dump_file)
1281 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1284 while (!fibheap_empty (heap))
1286 int badness = fibheap_min_key (heap);
1288 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1289 gcc_assert (edge->aux);
1290 edge->aux = NULL;
1291 if (!edge->inline_failed)
1292 continue;
1293 #ifdef ENABLE_CHECKING
1294 gcc_assert (cgraph_edge_badness (edge, false) >= badness);
1295 #endif
1296 if (dump_file)
1298 fprintf (dump_file,
1299 "\nSkipping %s with %i size\n",
1300 cgraph_node_name (edge->callee),
1301 edge->callee->global.size);
1302 fprintf (dump_file,
1303 " called by %s in %s:%i\n"
1304 " Estimated growth after inlined into all callees is %+i insns.\n"
1305 " Estimated badness is %i, frequency %.2f.\n",
1306 cgraph_node_name (edge->caller),
1307 flag_wpa ? "unknown"
1308 : gimple_filename ((const_gimple) edge->call_stmt),
1309 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1310 cgraph_estimate_growth (edge->callee),
1311 badness,
1312 edge->frequency / (double)CGRAPH_FREQ_BASE);
1313 if (edge->count)
1314 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1315 if (dump_flags & TDF_DETAILS)
1316 cgraph_edge_badness (edge, true);
1318 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1319 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1320 &edge->inline_failed))
1321 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1324 if (new_indirect_edges)
1325 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1326 fibheap_delete (heap);
1327 BITMAP_FREE (updated_nodes);
1330 /* Flatten NODE from the IPA inliner. */
1332 static void
1333 cgraph_flatten (struct cgraph_node *node)
1335 struct cgraph_edge *e;
1337 /* We shouldn't be called recursively when we are being processed. */
1338 gcc_assert (node->aux == NULL);
1340 node->aux = (void *)(size_t) INLINE_ALL;
1342 for (e = node->callees; e; e = e->next_callee)
1344 struct cgraph_node *orig_callee;
1346 if (e->call_stmt_cannot_inline_p)
1347 continue;
1349 if (!e->callee->analyzed)
1351 if (dump_file)
1352 fprintf (dump_file,
1353 "Not inlining: Function body not available.\n");
1354 continue;
1357 /* We've hit cycle? It is time to give up. */
1358 if (e->callee->aux)
1360 if (dump_file)
1361 fprintf (dump_file,
1362 "Not inlining %s into %s to avoid cycle.\n",
1363 cgraph_node_name (e->callee),
1364 cgraph_node_name (e->caller));
1365 e->inline_failed = CIF_RECURSIVE_INLINING;
1366 continue;
1369 /* When the edge is already inlined, we just need to recurse into
1370 it in order to fully flatten the leaves. */
1371 if (!e->inline_failed)
1373 cgraph_flatten (e->callee);
1374 continue;
1377 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1379 if (dump_file)
1380 fprintf (dump_file, "Not inlining: recursive call.\n");
1381 continue;
1384 if (!tree_can_inline_p (e))
1386 if (dump_file)
1387 fprintf (dump_file, "Not inlining: %s",
1388 cgraph_inline_failed_string (e->inline_failed));
1389 continue;
1392 /* Inline the edge and flatten the inline clone. Avoid
1393 recursing through the original node if the node was cloned. */
1394 if (dump_file)
1395 fprintf (dump_file, " Inlining %s into %s.\n",
1396 cgraph_node_name (e->callee),
1397 cgraph_node_name (e->caller));
1398 orig_callee = e->callee;
1399 cgraph_mark_inline_edge (e, true, NULL);
1400 if (e->callee != orig_callee)
1401 orig_callee->aux = (void *)(size_t) INLINE_ALL;
1402 cgraph_flatten (e->callee);
1403 if (e->callee != orig_callee)
1404 orig_callee->aux = NULL;
1407 node->aux = NULL;
1410 /* Decide on the inlining. We do so in the topological order to avoid
1411 expenses on updating data structures. */
1413 static unsigned int
1414 cgraph_decide_inlining (void)
1416 struct cgraph_node *node;
1417 int nnodes;
1418 struct cgraph_node **order =
1419 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1420 int old_size = 0;
1421 int i;
1422 int initial_size = 0;
1424 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1425 if (in_lto_p && flag_indirect_inlining)
1426 ipa_update_after_lto_read ();
1427 if (flag_indirect_inlining)
1428 ipa_create_all_structures_for_iinln ();
1430 max_count = 0;
1431 max_benefit = 0;
1432 for (node = cgraph_nodes; node; node = node->next)
1433 if (node->analyzed)
1435 struct cgraph_edge *e;
1437 gcc_assert (inline_summary (node)->self_size == node->global.size);
1438 initial_size += node->global.size;
1439 for (e = node->callees; e; e = e->next_callee)
1440 if (max_count < e->count)
1441 max_count = e->count;
1442 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1443 max_benefit = inline_summary (node)->time_inlining_benefit;
1445 gcc_assert (in_lto_p
1446 || !max_count
1447 || (profile_info && flag_branch_probabilities));
1448 overall_size = initial_size;
1450 nnodes = cgraph_postorder (order);
1452 if (dump_file)
1453 fprintf (dump_file,
1454 "\nDeciding on inlining. Starting with size %i.\n",
1455 initial_size);
1457 for (node = cgraph_nodes; node; node = node->next)
1458 node->aux = 0;
1460 if (dump_file)
1461 fprintf (dump_file, "\nFlattening functions:\n");
1463 /* In the first pass handle functions to be flattened. Do this with
1464 a priority so none of our later choices will make this impossible. */
1465 for (i = nnodes - 1; i >= 0; i--)
1467 node = order[i];
1469 /* Handle nodes to be flattened, but don't update overall unit
1470 size. Calling the incremental inliner here is lame,
1471 a simple worklist should be enough. What should be left
1472 here from the early inliner (if it runs) is cyclic cases.
1473 Ideally when processing callees we stop inlining at the
1474 entry of cycles, possibly cloning that entry point and
1475 try to flatten itself turning it into a self-recursive
1476 function. */
1477 if (lookup_attribute ("flatten",
1478 DECL_ATTRIBUTES (node->decl)) != NULL)
1480 if (dump_file)
1481 fprintf (dump_file,
1482 "Flattening %s\n", cgraph_node_name (node));
1483 cgraph_flatten (node);
1487 cgraph_decide_inlining_of_small_functions ();
1489 if (flag_inline_functions_called_once)
1491 if (dump_file)
1492 fprintf (dump_file, "\nDeciding on functions called once:\n");
1494 /* And finally decide what functions are called once. */
1495 for (i = nnodes - 1; i >= 0; i--)
1497 node = order[i];
1499 if (node->callers
1500 && !node->callers->next_caller
1501 && cgraph_will_be_removed_from_program_if_no_direct_calls (node)
1502 && node->local.inlinable
1503 && node->callers->inline_failed
1504 && node->callers->caller != node
1505 && node->callers->caller->global.inlined_to != node
1506 && !node->callers->call_stmt_cannot_inline_p
1507 && !DECL_EXTERNAL (node->decl))
1509 cgraph_inline_failed_t reason;
1510 old_size = overall_size;
1511 if (dump_file)
1513 fprintf (dump_file,
1514 "\nConsidering %s size %i.\n",
1515 cgraph_node_name (node), node->global.size);
1516 fprintf (dump_file,
1517 " Called once from %s %i insns.\n",
1518 cgraph_node_name (node->callers->caller),
1519 node->callers->caller->global.size);
1522 if (cgraph_check_inline_limits (node->callers->caller, node,
1523 &reason, false))
1525 struct cgraph_node *caller = node->callers->caller;
1526 cgraph_mark_inline (node->callers);
1527 if (dump_file)
1528 fprintf (dump_file,
1529 " Inlined into %s which now has %i size"
1530 " for a net change of %+i size.\n",
1531 cgraph_node_name (caller),
1532 caller->global.size,
1533 overall_size - old_size);
1535 else
1537 if (dump_file)
1538 fprintf (dump_file,
1539 " Not inlining: %s.\n",
1540 cgraph_inline_failed_string (reason));
1546 /* Free ipa-prop structures if they are no longer needed. */
1547 if (flag_indirect_inlining)
1548 ipa_free_all_structures_after_iinln ();
1550 if (dump_file)
1551 fprintf (dump_file,
1552 "\nInlined %i calls, eliminated %i functions, "
1553 "size %i turned to %i size.\n\n",
1554 ncalls_inlined, nfunctions_inlined, initial_size,
1555 overall_size);
1556 free (order);
1557 return 0;
1560 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1561 in leaf functions. */
1562 static bool
1563 leaf_node_p (struct cgraph_node *n)
1565 struct cgraph_edge *e;
1566 for (e = n->callees; e; e = e->next_callee)
1567 if (!DECL_BUILT_IN (e->callee->decl)
1568 || (!TREE_READONLY (e->callee->decl)
1569 || DECL_PURE_P (e->callee->decl)))
1570 return false;
1571 return true;
1574 /* Decide on the inlining. We do so in the topological order to avoid
1575 expenses on updating data structures. */
1577 static bool
1578 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1579 enum inlining_mode mode)
1581 struct cgraph_edge *e;
1582 bool inlined = false;
1583 cgraph_inline_failed_t failed_reason;
1585 #ifdef ENABLE_CHECKING
1586 verify_cgraph_node (node);
1587 #endif
1589 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1590 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1592 if (dump_file)
1593 fprintf (dump_file, "Incrementally flattening %s\n",
1594 cgraph_node_name (node));
1595 mode = INLINE_ALL;
1598 /* First of all look for always inline functions. */
1599 if (mode != INLINE_SIZE_NORECURSIVE)
1600 for (e = node->callees; e; e = e->next_callee)
1602 if (!e->callee->local.disregard_inline_limits
1603 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1604 continue;
1605 if (e->call_stmt_cannot_inline_p)
1606 continue;
1607 if (dump_file)
1608 fprintf (dump_file,
1609 "Considering to always inline inline candidate %s.\n",
1610 cgraph_node_name (e->callee));
1611 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1613 if (dump_file)
1614 fprintf (dump_file, "Not inlining: recursive call.\n");
1615 continue;
1617 if (!tree_can_inline_p (e))
1619 if (dump_file)
1620 fprintf (dump_file,
1621 "Not inlining: %s",
1622 cgraph_inline_failed_string (e->inline_failed));
1623 continue;
1625 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1626 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1628 if (dump_file)
1629 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1630 continue;
1632 if (!e->callee->analyzed)
1634 if (dump_file)
1635 fprintf (dump_file,
1636 "Not inlining: Function body no longer available.\n");
1637 continue;
1640 if (dump_file)
1641 fprintf (dump_file, " Inlining %s into %s.\n",
1642 cgraph_node_name (e->callee),
1643 cgraph_node_name (e->caller));
1644 cgraph_mark_inline (e);
1645 inlined = true;
1648 /* Now do the automatic inlining. */
1649 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE
1650 /* Never inline regular functions into always-inline functions
1651 during incremental inlining. */
1652 && !node->local.disregard_inline_limits)
1654 bitmap visited = BITMAP_ALLOC (NULL);
1655 for (e = node->callees; e; e = e->next_callee)
1657 int allowed_growth = 0;
1658 if (!e->callee->local.inlinable
1659 || !e->inline_failed
1660 || e->callee->local.disregard_inline_limits)
1661 continue;
1662 /* We are inlining a function to all call-sites in node
1663 or to none. So visit each candidate only once. */
1664 if (!bitmap_set_bit (visited, e->callee->uid))
1665 continue;
1666 if (dump_file)
1667 fprintf (dump_file, "Considering inline candidate %s.\n",
1668 cgraph_node_name (e->callee));
1669 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1671 if (dump_file)
1672 fprintf (dump_file, "Not inlining: recursive call.\n");
1673 continue;
1675 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1676 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1678 if (dump_file)
1679 fprintf (dump_file,
1680 "Not inlining: SSA form does not match.\n");
1681 continue;
1684 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1685 && optimize_function_for_speed_p (cfun))
1686 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1688 /* When the function body would grow and inlining the function
1689 won't eliminate the need for offline copy of the function,
1690 don't inline. */
1691 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1692 || (!flag_inline_functions
1693 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1694 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1695 > e->caller->global.size + allowed_growth)
1696 && cgraph_estimate_growth (e->callee) > allowed_growth)
1698 if (dump_file)
1699 fprintf (dump_file,
1700 "Not inlining: code size would grow by %i.\n",
1701 cgraph_estimate_size_after_inlining (1, e->caller,
1702 e->callee)
1703 - e->caller->global.size);
1704 continue;
1706 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1707 false)
1708 || e->call_stmt_cannot_inline_p)
1710 if (dump_file)
1711 fprintf (dump_file, "Not inlining: %s.\n",
1712 cgraph_inline_failed_string (e->inline_failed));
1713 continue;
1715 if (!e->callee->analyzed)
1717 if (dump_file)
1718 fprintf (dump_file,
1719 "Not inlining: Function body no longer available.\n");
1720 continue;
1722 if (!tree_can_inline_p (e))
1724 if (dump_file)
1725 fprintf (dump_file,
1726 "Not inlining: %s.",
1727 cgraph_inline_failed_string (e->inline_failed));
1728 continue;
1730 if (cgraph_default_inline_p (e->callee, &failed_reason))
1732 if (dump_file)
1733 fprintf (dump_file, " Inlining %s into %s.\n",
1734 cgraph_node_name (e->callee),
1735 cgraph_node_name (e->caller));
1736 cgraph_mark_inline (e);
1737 inlined = true;
1740 BITMAP_FREE (visited);
1742 return inlined;
1745 /* Because inlining might remove no-longer reachable nodes, we need to
1746 keep the array visible to garbage collector to avoid reading collected
1747 out nodes. */
1748 static int nnodes;
1749 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1751 /* Do inlining of small functions. Doing so early helps profiling and other
1752 passes to be somewhat more effective and avoids some code duplication in
1753 later real inlining pass for testcases with very many function calls. */
1754 static unsigned int
1755 cgraph_early_inlining (void)
1757 struct cgraph_node *node = cgraph_node (current_function_decl);
1758 unsigned int todo = 0;
1759 int iterations = 0;
1761 if (seen_error ())
1762 return 0;
1764 if (!optimize
1765 || flag_no_inline
1766 || !flag_early_inlining)
1768 /* When not optimizing or not inlining inline only always-inline
1769 functions. */
1770 cgraph_decide_inlining_incrementally (node, INLINE_ALWAYS_INLINE);
1771 timevar_push (TV_INTEGRATION);
1772 todo |= optimize_inline_calls (current_function_decl);
1773 timevar_pop (TV_INTEGRATION);
1775 else
1777 if (lookup_attribute ("flatten",
1778 DECL_ATTRIBUTES (node->decl)) != NULL)
1780 if (dump_file)
1781 fprintf (dump_file,
1782 "Flattening %s\n", cgraph_node_name (node));
1783 cgraph_flatten (node);
1784 timevar_push (TV_INTEGRATION);
1785 todo |= optimize_inline_calls (current_function_decl);
1786 timevar_pop (TV_INTEGRATION);
1788 /* We iterate incremental inlining to get trivial cases of indirect
1789 inlining. */
1790 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1791 && cgraph_decide_inlining_incrementally (node,
1792 iterations
1793 ? INLINE_SIZE_NORECURSIVE
1794 : INLINE_SIZE))
1796 timevar_push (TV_INTEGRATION);
1797 todo |= optimize_inline_calls (current_function_decl);
1798 iterations++;
1799 timevar_pop (TV_INTEGRATION);
1801 if (dump_file)
1802 fprintf (dump_file, "Iterations: %i\n", iterations);
1805 cfun->always_inline_functions_inlined = true;
1807 return todo;
1810 struct gimple_opt_pass pass_early_inline =
1813 GIMPLE_PASS,
1814 "einline", /* name */
1815 NULL, /* gate */
1816 cgraph_early_inlining, /* execute */
1817 NULL, /* sub */
1818 NULL, /* next */
1819 0, /* static_pass_number */
1820 TV_INLINE_HEURISTICS, /* tv_id */
1821 0, /* properties_required */
1822 0, /* properties_provided */
1823 0, /* properties_destroyed */
1824 0, /* todo_flags_start */
1825 TODO_dump_func /* todo_flags_finish */
1829 /* When inlining shall be performed. */
1830 static bool
1831 cgraph_gate_ipa_early_inlining (void)
1833 return (flag_early_inlining
1834 && !in_lto_p
1835 && (flag_branch_probabilities || flag_test_coverage
1836 || profile_arc_flag));
1839 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1840 before tree profiling so we have stand alone IPA pass for doing so. */
1841 struct simple_ipa_opt_pass pass_ipa_early_inline =
1844 SIMPLE_IPA_PASS,
1845 "einline_ipa", /* name */
1846 cgraph_gate_ipa_early_inlining, /* gate */
1847 NULL, /* execute */
1848 NULL, /* sub */
1849 NULL, /* next */
1850 0, /* static_pass_number */
1851 TV_INLINE_HEURISTICS, /* tv_id */
1852 0, /* properties_required */
1853 0, /* properties_provided */
1854 0, /* properties_destroyed */
1855 0, /* todo_flags_start */
1856 TODO_dump_cgraph /* todo_flags_finish */
1860 /* See if statement might disappear after inlining. We are not terribly
1861 sophisficated, basically looking for simple abstraction penalty wrappers. */
1863 static bool
1864 likely_eliminated_by_inlining_p (gimple stmt)
1866 enum gimple_code code = gimple_code (stmt);
1867 switch (code)
1869 case GIMPLE_RETURN:
1870 return true;
1871 case GIMPLE_ASSIGN:
1872 if (gimple_num_ops (stmt) != 2)
1873 return false;
1875 /* Casts of parameters, loads from parameters passed by reference
1876 and stores to return value or parameters are probably free after
1877 inlining. */
1878 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1879 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1880 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1881 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1883 tree rhs = gimple_assign_rhs1 (stmt);
1884 tree lhs = gimple_assign_lhs (stmt);
1885 tree inner_rhs = rhs;
1886 tree inner_lhs = lhs;
1887 bool rhs_free = false;
1888 bool lhs_free = false;
1890 while (handled_component_p (inner_lhs)
1891 || TREE_CODE (inner_lhs) == MEM_REF)
1892 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1893 while (handled_component_p (inner_rhs)
1894 || TREE_CODE (inner_rhs) == ADDR_EXPR
1895 || TREE_CODE (inner_rhs) == MEM_REF)
1896 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1899 if (TREE_CODE (inner_rhs) == PARM_DECL
1900 || (TREE_CODE (inner_rhs) == SSA_NAME
1901 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1902 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1903 rhs_free = true;
1904 if (rhs_free && is_gimple_reg (lhs))
1905 lhs_free = true;
1906 if (((TREE_CODE (inner_lhs) == PARM_DECL
1907 || (TREE_CODE (inner_lhs) == SSA_NAME
1908 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1909 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1910 && inner_lhs != lhs)
1911 || TREE_CODE (inner_lhs) == RESULT_DECL
1912 || (TREE_CODE (inner_lhs) == SSA_NAME
1913 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1914 lhs_free = true;
1915 if (lhs_free
1916 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1917 rhs_free = true;
1918 if (lhs_free && rhs_free)
1919 return true;
1921 return false;
1922 default:
1923 return false;
1927 /* Compute function body size parameters for NODE. */
1929 static void
1930 estimate_function_body_sizes (struct cgraph_node *node)
1932 gcov_type time = 0;
1933 gcov_type time_inlining_benefit = 0;
1934 int size = 0;
1935 int size_inlining_benefit = 0;
1936 basic_block bb;
1937 gimple_stmt_iterator bsi;
1938 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1939 tree arg;
1940 int freq;
1941 tree funtype = TREE_TYPE (node->decl);
1943 if (dump_file)
1944 fprintf (dump_file, "Analyzing function body size: %s\n",
1945 cgraph_node_name (node));
1947 gcc_assert (my_function && my_function->cfg);
1948 FOR_EACH_BB_FN (bb, my_function)
1950 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1951 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1953 gimple stmt = gsi_stmt (bsi);
1954 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1955 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1957 if (dump_file && (dump_flags & TDF_DETAILS))
1959 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1960 freq, this_size, this_time);
1961 print_gimple_stmt (dump_file, stmt, 0, 0);
1963 this_time *= freq;
1964 time += this_time;
1965 size += this_size;
1966 if (likely_eliminated_by_inlining_p (stmt))
1968 size_inlining_benefit += this_size;
1969 time_inlining_benefit += this_time;
1970 if (dump_file && (dump_flags & TDF_DETAILS))
1971 fprintf (dump_file, " Likely eliminated\n");
1973 gcc_assert (time >= 0);
1974 gcc_assert (size >= 0);
1977 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1978 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1979 / CGRAPH_FREQ_BASE);
1980 if (dump_file)
1981 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1982 (int)time, (int)time_inlining_benefit,
1983 size, size_inlining_benefit);
1984 time_inlining_benefit += eni_time_weights.call_cost;
1985 size_inlining_benefit += eni_size_weights.call_cost;
1986 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1988 int cost = estimate_move_cost (TREE_TYPE (funtype));
1989 time_inlining_benefit += cost;
1990 size_inlining_benefit += cost;
1992 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = DECL_CHAIN (arg))
1993 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1995 int cost = estimate_move_cost (TREE_TYPE (arg));
1996 time_inlining_benefit += cost;
1997 size_inlining_benefit += cost;
1999 if (time_inlining_benefit > MAX_TIME)
2000 time_inlining_benefit = MAX_TIME;
2001 if (time > MAX_TIME)
2002 time = MAX_TIME;
2003 inline_summary (node)->self_time = time;
2004 inline_summary (node)->self_size = size;
2005 if (dump_file)
2006 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
2007 (int)time, (int)time_inlining_benefit,
2008 size, size_inlining_benefit);
2009 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
2010 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
2013 /* Compute parameters of functions used by inliner. */
2014 unsigned int
2015 compute_inline_parameters (struct cgraph_node *node)
2017 HOST_WIDE_INT self_stack_size;
2019 gcc_assert (!node->global.inlined_to);
2021 /* Estimate the stack size for the function. But not at -O0
2022 because estimated_stack_frame_size is a quadratic problem. */
2023 self_stack_size = optimize ? estimated_stack_frame_size (node->decl) : 0;
2024 inline_summary (node)->estimated_self_stack_size = self_stack_size;
2025 node->global.estimated_stack_size = self_stack_size;
2026 node->global.stack_frame_offset = 0;
2028 /* Can this function be inlined at all? */
2029 node->local.inlinable = tree_inlinable_function_p (node->decl);
2030 if (node->local.inlinable && !node->local.disregard_inline_limits)
2031 node->local.disregard_inline_limits
2032 = DECL_DISREGARD_INLINE_LIMITS (node->decl);
2033 estimate_function_body_sizes (node);
2034 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2035 node->global.time = inline_summary (node)->self_time;
2036 node->global.size = inline_summary (node)->self_size;
2037 return 0;
2041 /* Compute parameters of functions used by inliner using
2042 current_function_decl. */
2043 static unsigned int
2044 compute_inline_parameters_for_current (void)
2046 compute_inline_parameters (cgraph_node (current_function_decl));
2047 return 0;
2050 struct gimple_opt_pass pass_inline_parameters =
2053 GIMPLE_PASS,
2054 "inline_param", /* name */
2055 NULL, /* gate */
2056 compute_inline_parameters_for_current,/* execute */
2057 NULL, /* sub */
2058 NULL, /* next */
2059 0, /* static_pass_number */
2060 TV_INLINE_HEURISTICS, /* tv_id */
2061 0, /* properties_required */
2062 0, /* properties_provided */
2063 0, /* properties_destroyed */
2064 0, /* todo_flags_start */
2065 0 /* todo_flags_finish */
2069 /* This function performs intraprocedural analyzis in NODE that is required to
2070 inline indirect calls. */
2071 static void
2072 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
2074 ipa_analyze_node (node);
2075 if (dump_file && (dump_flags & TDF_DETAILS))
2077 ipa_print_node_params (dump_file, node);
2078 ipa_print_node_jump_functions (dump_file, node);
2082 /* Note function body size. */
2083 static void
2084 analyze_function (struct cgraph_node *node)
2086 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2087 current_function_decl = node->decl;
2089 compute_inline_parameters (node);
2090 /* FIXME: We should remove the optimize check after we ensure we never run
2091 IPA passes when not optimizng. */
2092 if (flag_indirect_inlining && optimize)
2093 inline_indirect_intraprocedural_analysis (node);
2095 current_function_decl = NULL;
2096 pop_cfun ();
2099 /* Called when new function is inserted to callgraph late. */
2100 static void
2101 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
2103 analyze_function (node);
2106 /* Note function body size. */
2107 static void
2108 inline_generate_summary (void)
2110 struct cgraph_node *node;
2112 function_insertion_hook_holder =
2113 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2115 if (flag_indirect_inlining)
2117 ipa_register_cgraph_hooks ();
2118 ipa_check_create_node_params ();
2119 ipa_check_create_edge_args ();
2122 for (node = cgraph_nodes; node; node = node->next)
2123 if (node->analyzed)
2124 analyze_function (node);
2126 return;
2129 /* Apply inline plan to function. */
2130 static unsigned int
2131 inline_transform (struct cgraph_node *node)
2133 unsigned int todo = 0;
2134 struct cgraph_edge *e;
2135 bool inline_p = false;
2137 /* FIXME: Currently the passmanager is adding inline transform more than once to some
2138 clones. This needs revisiting after WPA cleanups. */
2139 if (cfun->after_inlining)
2140 return 0;
2142 /* We might need the body of this function so that we can expand
2143 it inline somewhere else. */
2144 if (cgraph_preserve_function_body_p (node->decl))
2145 save_inline_function_body (node);
2147 for (e = node->callees; e; e = e->next_callee)
2149 cgraph_redirect_edge_call_stmt_to_callee (e);
2150 if (!e->inline_failed || warn_inline)
2151 inline_p = true;
2154 if (inline_p)
2156 timevar_push (TV_INTEGRATION);
2157 todo = optimize_inline_calls (current_function_decl);
2158 timevar_pop (TV_INTEGRATION);
2160 cfun->always_inline_functions_inlined = true;
2161 cfun->after_inlining = true;
2162 return todo | execute_fixup_cfg ();
2165 /* Read inline summary. Jump functions are shared among ipa-cp
2166 and inliner, so when ipa-cp is active, we don't need to write them
2167 twice. */
2169 static void
2170 inline_read_summary (void)
2172 if (flag_indirect_inlining)
2174 ipa_register_cgraph_hooks ();
2175 if (!flag_ipa_cp)
2176 ipa_prop_read_jump_functions ();
2178 function_insertion_hook_holder =
2179 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2182 /* Write inline summary for node in SET.
2183 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
2184 active, we don't need to write them twice. */
2186 static void
2187 inline_write_summary (cgraph_node_set set,
2188 varpool_node_set vset ATTRIBUTE_UNUSED)
2190 if (flag_indirect_inlining && !flag_ipa_cp)
2191 ipa_prop_write_jump_functions (set);
2194 /* When to run IPA inlining. Inlining of always-inline functions
2195 happens during early inlining. */
2197 static bool
2198 gate_cgraph_decide_inlining (void)
2200 /* ??? We'd like to skip this if not optimizing or not inlining as
2201 all always-inline functions have been processed by early
2202 inlining already. But this at least breaks EH with C++ as
2203 we need to unconditionally run fixup_cfg even at -O0.
2204 So leave it on unconditionally for now. */
2205 return 1;
2208 struct ipa_opt_pass_d pass_ipa_inline =
2211 IPA_PASS,
2212 "inline", /* name */
2213 gate_cgraph_decide_inlining, /* gate */
2214 cgraph_decide_inlining, /* execute */
2215 NULL, /* sub */
2216 NULL, /* next */
2217 0, /* static_pass_number */
2218 TV_INLINE_HEURISTICS, /* tv_id */
2219 0, /* properties_required */
2220 0, /* properties_provided */
2221 0, /* properties_destroyed */
2222 TODO_remove_functions, /* todo_flags_finish */
2223 TODO_dump_cgraph | TODO_dump_func
2224 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
2226 inline_generate_summary, /* generate_summary */
2227 inline_write_summary, /* write_summary */
2228 inline_read_summary, /* read_summary */
2229 NULL, /* write_optimization_summary */
2230 NULL, /* read_optimization_summary */
2231 NULL, /* stmt_fixup */
2232 0, /* TODOs */
2233 inline_transform, /* function_transform */
2234 NULL, /* variable_transform */
2238 #include "gt-ipa-inline.h"