ChangeLog config
[official-gcc.git] / gcc / ipa-inline.c
blob964d03ba21a39c211a292394f7977e60819eeb83
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
29 There are three major parts of this file:
31 cgraph_mark_inline implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
43 inlining heuristics
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used by early inliner.
66 The inliner itself is split into several passes:
68 pass_inline_parameters
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
79 pass_early_inlining
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
86 The pass is run during conversion into SSA form. Only functions already
87 converted into SSA form are inlined, so the conversion must happen in
88 topological order on the callgraph (that is maintained by pass manager).
89 The functions after inlining are early optimized so the early inliner sees
90 unoptimized function itself, but all considered callees are already
91 optimized allowing it to unfold abstraction penalty on C++ effectively and
92 cheaply.
94 pass_ipa_early_inlining
96 With profiling, the early inlining is also necessary to reduce
97 instrumentation costs on program with high abstraction penalty (doing
98 many redundant calls). This can't happen in parallel with early
99 optimization and profile instrumentation, because we would end up
100 re-instrumenting already instrumented function bodies we brought in via
101 inlining.
103 To avoid this, this pass is executed as IPA pass before profiling. It is
104 simple wrapper to pass_early_inlining and ensures first inlining.
106 pass_ipa_inline
108 This is the main pass implementing simple greedy algorithm to do inlining
109 of small functions that results in overall growth of compilation unit and
110 inlining of functions called once. The pass compute just so called inline
111 plan (representation of inlining to be done in callgraph) and unlike early
112 inlining it is not performing the inlining itself.
114 pass_apply_inline
116 This pass performs actual inlining according to pass_ipa_inline on given
117 function. Possible the function body before inlining is saved when it is
118 needed for further inlining later.
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "tm.h"
125 #include "tree.h"
126 #include "tree-inline.h"
127 #include "langhooks.h"
128 #include "flags.h"
129 #include "cgraph.h"
130 #include "diagnostic.h"
131 #include "gimple-pretty-print.h"
132 #include "timevar.h"
133 #include "params.h"
134 #include "fibheap.h"
135 #include "intl.h"
136 #include "tree-pass.h"
137 #include "hashtab.h"
138 #include "coverage.h"
139 #include "ggc.h"
140 #include "tree-flow.h"
141 #include "rtl.h"
142 #include "ipa-prop.h"
143 #include "except.h"
145 #define MAX_TIME 1000000000
147 /* Mode incremental inliner operate on:
149 In ALWAYS_INLINE only functions marked
150 always_inline are inlined. This mode is used after detecting cycle during
151 flattening.
153 In SIZE mode, only functions that reduce function body size after inlining
154 are inlined, this is used during early inlining.
156 in ALL mode, everything is inlined. This is used during flattening. */
157 enum inlining_mode {
158 INLINE_NONE = 0,
159 INLINE_ALWAYS_INLINE,
160 INLINE_SIZE_NORECURSIVE,
161 INLINE_SIZE,
162 INLINE_ALL
165 static bool
166 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode);
167 static void cgraph_flatten (struct cgraph_node *node);
170 /* Statistics we collect about inlining algorithm. */
171 static int ncalls_inlined;
172 static int nfunctions_inlined;
173 static int overall_size;
174 static gcov_type max_count, max_benefit;
176 /* Holders of ipa cgraph hooks: */
177 static struct cgraph_node_hook_list *function_insertion_hook_holder;
179 static inline struct inline_summary *
180 inline_summary (struct cgraph_node *node)
182 return &node->local.inline_summary;
185 /* Estimate self time of the function after inlining WHAT into TO. */
187 static int
188 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
189 struct cgraph_node *what)
191 gcov_type time = (((gcov_type)what->global.time
192 - inline_summary (what)->time_inlining_benefit)
193 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
194 + to->global.time;
195 if (time < 0)
196 time = 0;
197 if (time > MAX_TIME)
198 time = MAX_TIME;
199 return time;
202 /* Estimate self time of the function after inlining WHAT into TO. */
204 static inline int
205 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
206 struct cgraph_node *what)
208 int size = ((what->global.size - inline_summary (what)->size_inlining_benefit)
209 * times + to->global.size);
210 gcc_assert (size >= 0);
211 return size;
214 /* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
215 by NEST. */
217 static void
218 update_noncloned_frequencies (struct cgraph_node *node,
219 int freq_scale, int nest)
221 struct cgraph_edge *e;
223 /* We do not want to ignore high loop nest after freq drops to 0. */
224 if (!freq_scale)
225 freq_scale = 1;
226 for (e = node->callees; e; e = e->next_callee)
228 e->loop_nest += nest;
229 e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
230 if (e->frequency > CGRAPH_FREQ_MAX)
231 e->frequency = CGRAPH_FREQ_MAX;
232 if (!e->inline_failed)
233 update_noncloned_frequencies (e->callee, freq_scale, nest);
237 /* E is expected to be an edge being inlined. Clone destination node of
238 the edge and redirect it to the new clone.
239 DUPLICATE is used for bookkeeping on whether we are actually creating new
240 clones or re-using node originally representing out-of-line function call.
242 void
243 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
244 bool update_original)
246 HOST_WIDE_INT peak;
248 if (duplicate)
250 /* We may eliminate the need for out-of-line copy to be output.
251 In that case just go ahead and re-use it. */
252 if (!e->callee->callers->next_caller
253 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
254 /* Don't reuse if more than one function shares a comdat group.
255 If the other function(s) are needed, we need to emit even
256 this function out of line. */
257 && !e->callee->same_comdat_group
258 && !cgraph_new_nodes)
260 gcc_assert (!e->callee->global.inlined_to);
261 if (e->callee->analyzed)
263 overall_size -= e->callee->global.size;
264 nfunctions_inlined++;
266 duplicate = false;
267 e->callee->local.externally_visible = false;
268 update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
270 else
272 struct cgraph_node *n;
273 n = cgraph_clone_node (e->callee, e->callee->decl,
274 e->count, e->frequency, e->loop_nest,
275 update_original, NULL);
276 cgraph_redirect_edge_callee (e, n);
280 if (e->caller->global.inlined_to)
281 e->callee->global.inlined_to = e->caller->global.inlined_to;
282 else
283 e->callee->global.inlined_to = e->caller;
284 e->callee->global.stack_frame_offset
285 = e->caller->global.stack_frame_offset
286 + inline_summary (e->caller)->estimated_self_stack_size;
287 peak = e->callee->global.stack_frame_offset
288 + inline_summary (e->callee)->estimated_self_stack_size;
289 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
290 e->callee->global.inlined_to->global.estimated_stack_size = peak;
291 cgraph_propagate_frequency (e->callee);
293 /* Recursively clone all bodies. */
294 for (e = e->callee->callees; e; e = e->next_callee)
295 if (!e->inline_failed)
296 cgraph_clone_inlined_nodes (e, duplicate, update_original);
299 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
300 specify whether profile of original function should be updated. If any new
301 indirect edges are discovered in the process, add them to NEW_EDGES, unless
302 it is NULL. Return true iff any new callgraph edges were discovered as a
303 result of inlining. */
305 static bool
306 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
307 VEC (cgraph_edge_p, heap) **new_edges)
309 int old_size = 0, new_size = 0;
310 struct cgraph_node *to = NULL, *what;
311 struct cgraph_edge *curr = e;
312 int freq;
314 gcc_assert (e->inline_failed);
315 e->inline_failed = CIF_OK;
316 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
318 cgraph_clone_inlined_nodes (e, true, update_original);
320 what = e->callee;
322 freq = e->frequency;
323 /* Now update size of caller and all functions caller is inlined into. */
324 for (;e && !e->inline_failed; e = e->caller->callers)
326 to = e->caller;
327 old_size = e->caller->global.size;
328 new_size = cgraph_estimate_size_after_inlining (1, to, what);
329 to->global.size = new_size;
330 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
332 gcc_assert (what->global.inlined_to == to);
333 if (new_size > old_size)
334 overall_size += new_size - old_size;
335 ncalls_inlined++;
337 /* FIXME: We should remove the optimize check after we ensure we never run
338 IPA passes when not optimizng. */
339 if (flag_indirect_inlining && optimize)
340 return ipa_propagate_indirect_call_infos (curr, new_edges);
341 else
342 return false;
345 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER. */
347 static void
348 cgraph_mark_inline (struct cgraph_edge *edge)
350 struct cgraph_node *to = edge->caller;
351 struct cgraph_node *what = edge->callee;
352 struct cgraph_edge *e, *next;
354 gcc_assert (!edge->call_stmt_cannot_inline_p);
355 /* Look for all calls, mark them inline and clone recursively
356 all inlined functions. */
357 for (e = what->callers; e; e = next)
359 next = e->next_caller;
360 if (e->caller == to && e->inline_failed)
362 cgraph_mark_inline_edge (e, true, NULL);
363 if (e == edge)
364 edge = next;
369 /* Estimate the growth caused by inlining NODE into all callees. */
371 static int
372 cgraph_estimate_growth (struct cgraph_node *node)
374 int growth = 0;
375 struct cgraph_edge *e;
376 bool self_recursive = false;
378 if (node->global.estimated_growth != INT_MIN)
379 return node->global.estimated_growth;
381 for (e = node->callers; e; e = e->next_caller)
383 if (e->caller == node)
384 self_recursive = true;
385 if (e->inline_failed)
386 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
387 - e->caller->global.size);
390 /* ??? Wrong for non-trivially self recursive functions or cases where
391 we decide to not inline for different reasons, but it is not big deal
392 as in that case we will keep the body around, but we will also avoid
393 some inlining. */
394 if (cgraph_will_be_removed_from_program_if_no_direct_calls (node)
395 && !DECL_EXTERNAL (node->decl) && !self_recursive)
396 growth -= node->global.size;
398 node->global.estimated_growth = growth;
399 return growth;
402 /* Return false when inlining WHAT into TO is not good idea
403 as it would cause too large growth of function bodies.
404 When ONE_ONLY is true, assume that only one call site is going
405 to be inlined, otherwise figure out how many call sites in
406 TO calls WHAT and verify that all can be inlined.
409 static bool
410 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
411 cgraph_inline_failed_t *reason, bool one_only)
413 int times = 0;
414 struct cgraph_edge *e;
415 int newsize;
416 int limit;
417 HOST_WIDE_INT stack_size_limit, inlined_stack;
419 if (one_only)
420 times = 1;
421 else
422 for (e = to->callees; e; e = e->next_callee)
423 if (e->callee == what)
424 times++;
426 if (to->global.inlined_to)
427 to = to->global.inlined_to;
429 /* When inlining large function body called once into small function,
430 take the inlined function as base for limiting the growth. */
431 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
432 limit = inline_summary (to)->self_size;
433 else
434 limit = inline_summary (what)->self_size;
436 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
438 /* Check the size after inlining against the function limits. But allow
439 the function to shrink if it went over the limits by forced inlining. */
440 newsize = cgraph_estimate_size_after_inlining (times, to, what);
441 if (newsize >= to->global.size
442 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
443 && newsize > limit)
445 if (reason)
446 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
447 return false;
450 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
452 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
454 inlined_stack = (to->global.stack_frame_offset
455 + inline_summary (to)->estimated_self_stack_size
456 + what->global.estimated_stack_size);
457 if (inlined_stack > stack_size_limit
458 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
460 if (reason)
461 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
462 return false;
464 return true;
467 /* Return true when function N is small enough to be inlined. */
469 static bool
470 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
472 tree decl = n->decl;
474 if (n->local.disregard_inline_limits)
475 return true;
477 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
479 if (reason)
480 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
481 return false;
483 if (!n->analyzed)
485 if (reason)
486 *reason = CIF_BODY_NOT_AVAILABLE;
487 return false;
489 if (cgraph_function_body_availability (n) <= AVAIL_OVERWRITABLE)
491 if (reason)
492 *reason = CIF_OVERWRITABLE;
493 return false;
497 if (DECL_DECLARED_INLINE_P (decl))
499 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
501 if (reason)
502 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
503 return false;
506 else
508 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
510 if (reason)
511 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
512 return false;
516 return true;
519 /* Return true when inlining WHAT would create recursive inlining.
520 We call recursive inlining all cases where same function appears more than
521 once in the single recursion nest path in the inline graph. */
523 static inline bool
524 cgraph_recursive_inlining_p (struct cgraph_node *to,
525 struct cgraph_node *what,
526 cgraph_inline_failed_t *reason)
528 bool recursive;
529 if (to->global.inlined_to)
530 recursive = what->decl == to->global.inlined_to->decl;
531 else
532 recursive = what->decl == to->decl;
533 /* Marking recursive function inline has sane semantic and thus we should
534 not warn on it. */
535 if (recursive && reason)
536 *reason = (what->local.disregard_inline_limits
537 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
538 return recursive;
541 /* A cost model driving the inlining heuristics in a way so the edges with
542 smallest badness are inlined first. After each inlining is performed
543 the costs of all caller edges of nodes affected are recomputed so the
544 metrics may accurately depend on values such as number of inlinable callers
545 of the function or function body size. */
547 static int
548 cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
550 gcov_type badness;
551 int growth =
552 (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
553 - edge->caller->global.size);
555 if (edge->callee->local.disregard_inline_limits)
556 return INT_MIN;
558 if (dump)
560 fprintf (dump_file, " Badness calculcation for %s -> %s\n",
561 cgraph_node_name (edge->caller),
562 cgraph_node_name (edge->callee));
563 fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
564 growth,
565 edge->callee->global.time,
566 inline_summary (edge->callee)->time_inlining_benefit,
567 edge->callee->global.size,
568 inline_summary (edge->callee)->size_inlining_benefit);
571 /* Always prefer inlining saving code size. */
572 if (growth <= 0)
574 badness = INT_MIN - growth;
575 if (dump)
576 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
577 growth);
580 /* When profiling is available, base priorities -(#calls / growth).
581 So we optimize for overall number of "executed" inlined calls. */
582 else if (max_count)
584 badness =
585 ((int)
586 ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
587 (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
588 if (dump)
590 fprintf (dump_file,
591 " %i (relative %f): profile info. Relative count %f"
592 " * Relative benefit %f\n",
593 (int) badness, (double) badness / INT_MIN,
594 (double) edge->count / max_count,
595 (double) (inline_summary (edge->callee)->
596 time_inlining_benefit + 1) / (max_benefit + 1));
600 /* When function local profile is available, base priorities on
601 growth / frequency, so we optimize for overall frequency of inlined
602 calls. This is not too accurate since while the call might be frequent
603 within function, the function itself is infrequent.
605 Other objective to optimize for is number of different calls inlined.
606 We add the estimated growth after inlining all functions to bias the
607 priorities slightly in this direction (so fewer times called functions
608 of the same size gets priority). */
609 else if (flag_guess_branch_prob)
611 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
612 int benefitperc;
613 int growth_for_all;
614 badness = growth * 10000;
615 benefitperc =
616 MIN (100 * inline_summary (edge->callee)->time_inlining_benefit /
617 (edge->callee->global.time + 1) +1, 100);
618 div *= benefitperc;
621 /* Decrease badness if call is nested. */
622 /* Compress the range so we don't overflow. */
623 if (div > 10000)
624 div = 10000 + ceil_log2 (div) - 8;
625 if (div < 1)
626 div = 1;
627 if (badness > 0)
628 badness /= div;
629 growth_for_all = cgraph_estimate_growth (edge->callee);
630 badness += growth_for_all;
631 if (badness > INT_MAX)
632 badness = INT_MAX;
633 if (dump)
635 fprintf (dump_file,
636 " %i: guessed profile. frequency %i, overall growth %i,"
637 " benefit %i%%, divisor %i\n",
638 (int) badness, edge->frequency, growth_for_all, benefitperc, div);
641 /* When function local profile is not available or it does not give
642 useful information (ie frequency is zero), base the cost on
643 loop nest and overall size growth, so we optimize for overall number
644 of functions fully inlined in program. */
645 else
647 int nest = MIN (edge->loop_nest, 8);
648 badness = cgraph_estimate_growth (edge->callee) * 256;
650 /* Decrease badness if call is nested. */
651 if (badness > 0)
652 badness >>= nest;
653 else
655 badness <<= nest;
657 if (dump)
658 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
659 nest);
662 /* Ensure that we did not overflow in all the fixed point math above. */
663 gcc_assert (badness >= INT_MIN);
664 gcc_assert (badness <= INT_MAX - 1);
665 /* Make recursive inlining happen always after other inlining is done. */
666 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
667 return badness + 1;
668 else
669 return badness;
672 /* Recompute badness of EDGE and update its key in HEAP if needed. */
673 static void
674 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
676 int badness = cgraph_edge_badness (edge, false);
677 if (edge->aux)
679 fibnode_t n = (fibnode_t) edge->aux;
680 gcc_checking_assert (n->data == edge);
682 /* fibheap_replace_key only decrease the keys.
683 When we increase the key we do not update heap
684 and instead re-insert the element once it becomes
685 a minium of heap. */
686 if (badness < n->key)
688 fibheap_replace_key (heap, n, badness);
689 gcc_checking_assert (n->key == badness);
692 else
693 edge->aux = fibheap_insert (heap, badness, edge);
696 /* Recompute heap nodes for each of caller edge. */
698 static void
699 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
700 bitmap updated_nodes)
702 struct cgraph_edge *edge;
703 cgraph_inline_failed_t failed_reason;
705 if (!node->local.inlinable
706 || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
707 || node->global.inlined_to)
708 return;
709 if (!bitmap_set_bit (updated_nodes, node->uid))
710 return;
711 node->global.estimated_growth = INT_MIN;
713 /* See if there is something to do. */
714 for (edge = node->callers; edge; edge = edge->next_caller)
715 if (edge->inline_failed)
716 break;
717 if (!edge)
718 return;
719 /* Prune out edges we won't inline into anymore. */
720 if (!cgraph_default_inline_p (node, &failed_reason))
722 for (; edge; edge = edge->next_caller)
723 if (edge->aux)
725 fibheap_delete_node (heap, (fibnode_t) edge->aux);
726 edge->aux = NULL;
727 if (edge->inline_failed)
728 edge->inline_failed = failed_reason;
730 return;
733 for (; edge; edge = edge->next_caller)
734 if (edge->inline_failed)
735 update_edge_key (heap, edge);
738 /* Recompute heap nodes for each uninlined call.
739 This is used when we know that edge badnesses are going only to increase
740 (we introduced new call site) and thus all we need is to insert newly
741 created edges into heap. */
743 static void
744 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
745 bitmap updated_nodes)
747 struct cgraph_edge *e = node->callees;
748 node->global.estimated_growth = INT_MIN;
750 if (!e)
751 return;
752 while (true)
753 if (!e->inline_failed && e->callee->callees)
754 e = e->callee->callees;
755 else
757 if (e->inline_failed
758 && e->callee->local.inlinable
759 && cgraph_function_body_availability (e->callee) >= AVAIL_AVAILABLE
760 && !bitmap_bit_p (updated_nodes, e->callee->uid))
762 node->global.estimated_growth = INT_MIN;
763 /* If function becomes uninlinable, we need to remove it from the heap. */
764 if (!cgraph_default_inline_p (e->callee, &e->inline_failed))
765 update_caller_keys (heap, e->callee, updated_nodes);
766 else
767 /* Otherwise update just edge E. */
768 update_edge_key (heap, e);
770 if (e->next_callee)
771 e = e->next_callee;
772 else
776 if (e->caller == node)
777 return;
778 e = e->caller->callers;
780 while (!e->next_callee);
781 e = e->next_callee;
786 /* Recompute heap nodes for each of caller edges of each of callees.
787 Walk recursively into all inline clones. */
789 static void
790 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
791 bitmap updated_nodes)
793 struct cgraph_edge *e = node->callees;
794 node->global.estimated_growth = INT_MIN;
796 if (!e)
797 return;
798 while (true)
799 if (!e->inline_failed && e->callee->callees)
800 e = e->callee->callees;
801 else
803 if (e->inline_failed)
804 update_caller_keys (heap, e->callee, updated_nodes);
805 if (e->next_callee)
806 e = e->next_callee;
807 else
811 if (e->caller == node)
812 return;
813 e = e->caller->callers;
815 while (!e->next_callee);
816 e = e->next_callee;
821 /* Enqueue all recursive calls from NODE into priority queue depending on
822 how likely we want to recursively inline the call. */
824 static void
825 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
826 fibheap_t heap)
828 static int priority;
829 struct cgraph_edge *e;
830 for (e = where->callees; e; e = e->next_callee)
831 if (e->callee == node)
833 /* When profile feedback is available, prioritize by expected number
834 of calls. Without profile feedback we maintain simple queue
835 to order candidates via recursive depths. */
836 fibheap_insert (heap,
837 !max_count ? priority++
838 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
841 for (e = where->callees; e; e = e->next_callee)
842 if (!e->inline_failed)
843 lookup_recursive_calls (node, e->callee, heap);
846 /* Decide on recursive inlining: in the case function has recursive calls,
847 inline until body size reaches given argument. If any new indirect edges
848 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
849 is NULL. */
851 static bool
852 cgraph_decide_recursive_inlining (struct cgraph_node *node,
853 VEC (cgraph_edge_p, heap) **new_edges)
855 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
856 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
857 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
858 fibheap_t heap;
859 struct cgraph_edge *e;
860 struct cgraph_node *master_clone, *next;
861 int depth = 0;
862 int n = 0;
864 /* It does not make sense to recursively inline always-inline functions
865 as we are going to sorry() on the remaining calls anyway. */
866 if (node->local.disregard_inline_limits
867 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl)))
868 return false;
870 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
871 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
872 return false;
874 if (DECL_DECLARED_INLINE_P (node->decl))
876 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
877 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
880 /* Make sure that function is small enough to be considered for inlining. */
881 if (!max_depth
882 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
883 return false;
884 heap = fibheap_new ();
885 lookup_recursive_calls (node, node, heap);
886 if (fibheap_empty (heap))
888 fibheap_delete (heap);
889 return false;
892 if (dump_file)
893 fprintf (dump_file,
894 " Performing recursive inlining on %s\n",
895 cgraph_node_name (node));
897 /* We need original clone to copy around. */
898 master_clone = cgraph_clone_node (node, node->decl,
899 node->count, CGRAPH_FREQ_BASE, 1,
900 false, NULL);
901 master_clone->needed = true;
902 for (e = master_clone->callees; e; e = e->next_callee)
903 if (!e->inline_failed)
904 cgraph_clone_inlined_nodes (e, true, false);
906 /* Do the inlining and update list of recursive call during process. */
907 while (!fibheap_empty (heap)
908 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
909 <= limit))
911 struct cgraph_edge *curr
912 = (struct cgraph_edge *) fibheap_extract_min (heap);
913 struct cgraph_node *cnode;
915 depth = 1;
916 for (cnode = curr->caller;
917 cnode->global.inlined_to; cnode = cnode->callers->caller)
918 if (node->decl == curr->callee->decl)
919 depth++;
920 if (depth > max_depth)
922 if (dump_file)
923 fprintf (dump_file,
924 " maximal depth reached\n");
925 continue;
928 if (max_count)
930 if (!cgraph_maybe_hot_edge_p (curr))
932 if (dump_file)
933 fprintf (dump_file, " Not inlining cold call\n");
934 continue;
936 if (curr->count * 100 / node->count < probability)
938 if (dump_file)
939 fprintf (dump_file,
940 " Probability of edge is too small\n");
941 continue;
945 if (dump_file)
947 fprintf (dump_file,
948 " Inlining call of depth %i", depth);
949 if (node->count)
951 fprintf (dump_file, " called approx. %.2f times per call",
952 (double)curr->count / node->count);
954 fprintf (dump_file, "\n");
956 cgraph_redirect_edge_callee (curr, master_clone);
957 cgraph_mark_inline_edge (curr, false, new_edges);
958 lookup_recursive_calls (node, curr->callee, heap);
959 n++;
961 if (!fibheap_empty (heap) && dump_file)
962 fprintf (dump_file, " Recursive inlining growth limit met.\n");
964 fibheap_delete (heap);
965 if (dump_file)
966 fprintf (dump_file,
967 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
968 master_clone->global.size, node->global.size,
969 master_clone->global.time, node->global.time);
971 /* Remove master clone we used for inlining. We rely that clones inlined
972 into master clone gets queued just before master clone so we don't
973 need recursion. */
974 for (node = cgraph_nodes; node != master_clone;
975 node = next)
977 next = node->next;
978 if (node->global.inlined_to == master_clone)
979 cgraph_remove_node (node);
981 cgraph_remove_node (master_clone);
982 /* FIXME: Recursive inlining actually reduces number of calls of the
983 function. At this place we should probably walk the function and
984 inline clones and compensate the counts accordingly. This probably
985 doesn't matter much in practice. */
986 return n > 0;
989 /* Set inline_failed for all callers of given function to REASON. */
991 static void
992 cgraph_set_inline_failed (struct cgraph_node *node,
993 cgraph_inline_failed_t reason)
995 struct cgraph_edge *e;
997 if (dump_file)
998 fprintf (dump_file, "Inlining failed: %s\n",
999 cgraph_inline_failed_string (reason));
1000 for (e = node->callers; e; e = e->next_caller)
1001 if (e->inline_failed)
1002 e->inline_failed = reason;
1005 /* Given whole compilation unit estimate of INSNS, compute how large we can
1006 allow the unit to grow. */
1007 static int
1008 compute_max_insns (int insns)
1010 int max_insns = insns;
1011 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1012 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1014 return ((HOST_WIDEST_INT) max_insns
1015 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1018 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1019 static void
1020 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
1022 while (VEC_length (cgraph_edge_p, new_edges) > 0)
1024 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
1026 gcc_assert (!edge->aux);
1027 if (edge->callee->local.inlinable
1028 && cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1029 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1034 /* We use greedy algorithm for inlining of small functions:
1035 All inline candidates are put into prioritized heap based on estimated
1036 growth of the overall number of instructions and then update the estimates.
1038 INLINED and INLINED_CALEES are just pointers to arrays large enough
1039 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
1041 static void
1042 cgraph_decide_inlining_of_small_functions (void)
1044 struct cgraph_node *node;
1045 struct cgraph_edge *edge;
1046 cgraph_inline_failed_t failed_reason;
1047 fibheap_t heap = fibheap_new ();
1048 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1049 int min_size, max_size;
1050 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1052 if (flag_indirect_inlining)
1053 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1055 if (dump_file)
1056 fprintf (dump_file, "\nDeciding on smaller functions:\n");
1058 /* Put all inline candidates into the heap. */
1060 for (node = cgraph_nodes; node; node = node->next)
1062 if (!node->local.inlinable || !node->callers)
1063 continue;
1064 if (dump_file)
1065 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
1067 node->global.estimated_growth = INT_MIN;
1068 if (!cgraph_default_inline_p (node, &failed_reason))
1070 cgraph_set_inline_failed (node, failed_reason);
1071 continue;
1074 for (edge = node->callers; edge; edge = edge->next_caller)
1075 if (edge->inline_failed)
1077 gcc_assert (!edge->aux);
1078 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1082 max_size = compute_max_insns (overall_size);
1083 min_size = overall_size;
1085 while (overall_size <= max_size
1086 && !fibheap_empty (heap))
1088 int old_size = overall_size;
1089 struct cgraph_node *where, *callee;
1090 int badness = fibheap_min_key (heap);
1091 int current_badness;
1092 int growth;
1093 cgraph_inline_failed_t not_good = CIF_OK;
1095 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1096 gcc_assert (edge->aux);
1097 edge->aux = NULL;
1098 if (!edge->inline_failed)
1099 continue;
1101 /* When updating the edge costs, we only decrease badness in the keys.
1102 When the badness increase, we keep the heap as it is and re-insert
1103 key now. */
1104 current_badness = cgraph_edge_badness (edge, false);
1105 gcc_assert (current_badness >= badness);
1106 if (current_badness != badness)
1108 edge->aux = fibheap_insert (heap, current_badness, edge);
1109 continue;
1112 callee = edge->callee;
1114 growth = (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
1115 - edge->caller->global.size);
1117 if (dump_file)
1119 fprintf (dump_file,
1120 "\nConsidering %s with %i size\n",
1121 cgraph_node_name (edge->callee),
1122 edge->callee->global.size);
1123 fprintf (dump_file,
1124 " to be inlined into %s in %s:%i\n"
1125 " Estimated growth after inlined into all callees is %+i insns.\n"
1126 " Estimated badness is %i, frequency %.2f.\n",
1127 cgraph_node_name (edge->caller),
1128 flag_wpa ? "unknown"
1129 : gimple_filename ((const_gimple) edge->call_stmt),
1130 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1131 cgraph_estimate_growth (edge->callee),
1132 badness,
1133 edge->frequency / (double)CGRAPH_FREQ_BASE);
1134 if (edge->count)
1135 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1136 if (dump_flags & TDF_DETAILS)
1137 cgraph_edge_badness (edge, true);
1140 /* When not having profile info ready we don't weight by any way the
1141 position of call in procedure itself. This means if call of
1142 function A from function B seems profitable to inline, the recursive
1143 call of function A in inline copy of A in B will look profitable too
1144 and we end up inlining until reaching maximal function growth. This
1145 is not good idea so prohibit the recursive inlining.
1147 ??? When the frequencies are taken into account we might not need this
1148 restriction.
1150 We need to be cureful here, in some testcases, e.g. directivec.c in
1151 libcpp, we can estimate self recursive function to have negative growth
1152 for inlining completely.
1154 if (!edge->count)
1156 where = edge->caller;
1157 while (where->global.inlined_to)
1159 if (where->decl == edge->callee->decl)
1160 break;
1161 where = where->callers->caller;
1163 if (where->global.inlined_to)
1165 edge->inline_failed
1166 = (edge->callee->local.disregard_inline_limits
1167 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1168 if (dump_file)
1169 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
1170 continue;
1174 if (edge->callee->local.disregard_inline_limits)
1176 else if (!cgraph_maybe_hot_edge_p (edge))
1177 not_good = CIF_UNLIKELY_CALL;
1178 else if (!flag_inline_functions
1179 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
1180 not_good = CIF_NOT_DECLARED_INLINED;
1181 else if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
1182 not_good = CIF_OPTIMIZING_FOR_SIZE;
1183 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
1185 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1186 &edge->inline_failed))
1188 edge->inline_failed = not_good;
1189 if (dump_file)
1190 fprintf (dump_file, " inline_failed:%s.\n",
1191 cgraph_inline_failed_string (edge->inline_failed));
1193 continue;
1195 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1197 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1198 &edge->inline_failed))
1200 if (dump_file)
1201 fprintf (dump_file, " inline_failed:%s.\n",
1202 cgraph_inline_failed_string (edge->inline_failed));
1204 continue;
1206 if (!tree_can_inline_p (edge))
1208 if (dump_file)
1209 fprintf (dump_file, " inline_failed:%s.\n",
1210 cgraph_inline_failed_string (edge->inline_failed));
1211 continue;
1213 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1214 &edge->inline_failed))
1216 where = edge->caller;
1217 if (where->global.inlined_to)
1218 where = where->global.inlined_to;
1219 if (!cgraph_decide_recursive_inlining (where,
1220 flag_indirect_inlining
1221 ? &new_indirect_edges : NULL))
1222 continue;
1223 if (flag_indirect_inlining)
1224 add_new_edges_to_heap (heap, new_indirect_edges);
1225 update_all_callee_keys (heap, where, updated_nodes);
1227 else
1229 struct cgraph_node *callee;
1230 if (edge->call_stmt_cannot_inline_p
1231 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1232 &edge->inline_failed, true))
1234 if (dump_file)
1235 fprintf (dump_file, " Not inlining into %s:%s.\n",
1236 cgraph_node_name (edge->caller),
1237 cgraph_inline_failed_string (edge->inline_failed));
1238 continue;
1240 callee = edge->callee;
1241 gcc_checking_assert (!callee->global.inlined_to);
1242 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1243 if (flag_indirect_inlining)
1244 add_new_edges_to_heap (heap, new_indirect_edges);
1246 /* We inlined last offline copy to the body. This might lead
1247 to callees of function having fewer call sites and thus they
1248 may need updating. */
1249 if (callee->global.inlined_to)
1250 update_all_callee_keys (heap, callee, updated_nodes);
1251 else
1252 update_callee_keys (heap, edge->callee, updated_nodes);
1254 where = edge->caller;
1255 if (where->global.inlined_to)
1256 where = where->global.inlined_to;
1258 /* Our profitability metric can depend on local properties
1259 such as number of inlinable calls and size of the function body.
1260 After inlining these properties might change for the function we
1261 inlined into (since it's body size changed) and for the functions
1262 called by function we inlined (since number of it inlinable callers
1263 might change). */
1264 update_caller_keys (heap, where, updated_nodes);
1266 /* We removed one call of the function we just inlined. If offline
1267 copy is still needed, be sure to update the keys. */
1268 if (callee != where && !callee->global.inlined_to)
1269 update_caller_keys (heap, callee, updated_nodes);
1270 bitmap_clear (updated_nodes);
1272 if (dump_file)
1274 fprintf (dump_file,
1275 " Inlined into %s which now has size %i and self time %i,"
1276 "net change of %+i.\n",
1277 cgraph_node_name (edge->caller),
1278 edge->caller->global.time,
1279 edge->caller->global.size,
1280 overall_size - old_size);
1282 if (min_size > overall_size)
1284 min_size = overall_size;
1285 max_size = compute_max_insns (min_size);
1287 if (dump_file)
1288 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1291 while (!fibheap_empty (heap))
1293 int badness = fibheap_min_key (heap);
1295 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1296 gcc_assert (edge->aux);
1297 edge->aux = NULL;
1298 if (!edge->inline_failed)
1299 continue;
1300 #ifdef ENABLE_CHECKING
1301 gcc_assert (cgraph_edge_badness (edge, false) >= badness);
1302 #endif
1303 if (dump_file)
1305 fprintf (dump_file,
1306 "\nSkipping %s with %i size\n",
1307 cgraph_node_name (edge->callee),
1308 edge->callee->global.size);
1309 fprintf (dump_file,
1310 " called by %s in %s:%i\n"
1311 " Estimated growth after inlined into all callees is %+i insns.\n"
1312 " Estimated badness is %i, frequency %.2f.\n",
1313 cgraph_node_name (edge->caller),
1314 flag_wpa ? "unknown"
1315 : gimple_filename ((const_gimple) edge->call_stmt),
1316 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1317 cgraph_estimate_growth (edge->callee),
1318 badness,
1319 edge->frequency / (double)CGRAPH_FREQ_BASE);
1320 if (edge->count)
1321 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1322 if (dump_flags & TDF_DETAILS)
1323 cgraph_edge_badness (edge, true);
1325 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1326 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1327 &edge->inline_failed))
1328 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1331 if (new_indirect_edges)
1332 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1333 fibheap_delete (heap);
1334 BITMAP_FREE (updated_nodes);
1337 /* Flatten NODE from the IPA inliner. */
1339 static void
1340 cgraph_flatten (struct cgraph_node *node)
1342 struct cgraph_edge *e;
1344 /* We shouldn't be called recursively when we are being processed. */
1345 gcc_assert (node->aux == NULL);
1347 node->aux = (void *)(size_t) INLINE_ALL;
1349 for (e = node->callees; e; e = e->next_callee)
1351 struct cgraph_node *orig_callee;
1353 if (e->call_stmt_cannot_inline_p)
1354 continue;
1356 if (!e->callee->analyzed)
1358 if (dump_file)
1359 fprintf (dump_file,
1360 "Not inlining: Function body not available.\n");
1361 continue;
1364 /* We've hit cycle? It is time to give up. */
1365 if (e->callee->aux)
1367 if (dump_file)
1368 fprintf (dump_file,
1369 "Not inlining %s into %s to avoid cycle.\n",
1370 cgraph_node_name (e->callee),
1371 cgraph_node_name (e->caller));
1372 e->inline_failed = CIF_RECURSIVE_INLINING;
1373 continue;
1376 /* When the edge is already inlined, we just need to recurse into
1377 it in order to fully flatten the leaves. */
1378 if (!e->inline_failed)
1380 cgraph_flatten (e->callee);
1381 continue;
1384 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1386 if (dump_file)
1387 fprintf (dump_file, "Not inlining: recursive call.\n");
1388 continue;
1391 if (!tree_can_inline_p (e))
1393 if (dump_file)
1394 fprintf (dump_file, "Not inlining: %s",
1395 cgraph_inline_failed_string (e->inline_failed));
1396 continue;
1399 /* Inline the edge and flatten the inline clone. Avoid
1400 recursing through the original node if the node was cloned. */
1401 if (dump_file)
1402 fprintf (dump_file, " Inlining %s into %s.\n",
1403 cgraph_node_name (e->callee),
1404 cgraph_node_name (e->caller));
1405 orig_callee = e->callee;
1406 cgraph_mark_inline_edge (e, true, NULL);
1407 if (e->callee != orig_callee)
1408 orig_callee->aux = (void *)(size_t) INLINE_ALL;
1409 cgraph_flatten (e->callee);
1410 if (e->callee != orig_callee)
1411 orig_callee->aux = NULL;
1414 node->aux = NULL;
1417 /* Decide on the inlining. We do so in the topological order to avoid
1418 expenses on updating data structures. */
1420 static unsigned int
1421 cgraph_decide_inlining (void)
1423 struct cgraph_node *node;
1424 int nnodes;
1425 struct cgraph_node **order =
1426 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1427 int old_size = 0;
1428 int i;
1429 int initial_size = 0;
1431 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1432 if (in_lto_p && flag_indirect_inlining)
1433 ipa_update_after_lto_read ();
1434 if (flag_indirect_inlining)
1435 ipa_create_all_structures_for_iinln ();
1437 max_count = 0;
1438 max_benefit = 0;
1439 for (node = cgraph_nodes; node; node = node->next)
1440 if (node->analyzed)
1442 struct cgraph_edge *e;
1444 gcc_assert (inline_summary (node)->self_size == node->global.size);
1445 initial_size += node->global.size;
1446 for (e = node->callees; e; e = e->next_callee)
1447 if (max_count < e->count)
1448 max_count = e->count;
1449 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1450 max_benefit = inline_summary (node)->time_inlining_benefit;
1452 gcc_assert (in_lto_p
1453 || !max_count
1454 || (profile_info && flag_branch_probabilities));
1455 overall_size = initial_size;
1457 nnodes = cgraph_postorder (order);
1459 if (dump_file)
1460 fprintf (dump_file,
1461 "\nDeciding on inlining. Starting with size %i.\n",
1462 initial_size);
1464 for (node = cgraph_nodes; node; node = node->next)
1465 node->aux = 0;
1467 if (dump_file)
1468 fprintf (dump_file, "\nFlattening functions:\n");
1470 /* In the first pass handle functions to be flattened. Do this with
1471 a priority so none of our later choices will make this impossible. */
1472 for (i = nnodes - 1; i >= 0; i--)
1474 node = order[i];
1476 /* Handle nodes to be flattened, but don't update overall unit
1477 size. Calling the incremental inliner here is lame,
1478 a simple worklist should be enough. What should be left
1479 here from the early inliner (if it runs) is cyclic cases.
1480 Ideally when processing callees we stop inlining at the
1481 entry of cycles, possibly cloning that entry point and
1482 try to flatten itself turning it into a self-recursive
1483 function. */
1484 if (lookup_attribute ("flatten",
1485 DECL_ATTRIBUTES (node->decl)) != NULL)
1487 if (dump_file)
1488 fprintf (dump_file,
1489 "Flattening %s\n", cgraph_node_name (node));
1490 cgraph_flatten (node);
1494 cgraph_decide_inlining_of_small_functions ();
1496 if (flag_inline_functions_called_once)
1498 if (dump_file)
1499 fprintf (dump_file, "\nDeciding on functions called once:\n");
1501 /* And finally decide what functions are called once. */
1502 for (i = nnodes - 1; i >= 0; i--)
1504 node = order[i];
1506 if (node->callers
1507 && !node->callers->next_caller
1508 && cgraph_will_be_removed_from_program_if_no_direct_calls (node)
1509 && node->local.inlinable
1510 && cgraph_function_body_availability (node) >= AVAIL_AVAILABLE
1511 && node->callers->inline_failed
1512 && node->callers->caller != node
1513 && node->callers->caller->global.inlined_to != node
1514 && !node->callers->call_stmt_cannot_inline_p
1515 && !DECL_EXTERNAL (node->decl))
1517 cgraph_inline_failed_t reason;
1518 old_size = overall_size;
1519 if (dump_file)
1521 fprintf (dump_file,
1522 "\nConsidering %s size %i.\n",
1523 cgraph_node_name (node), node->global.size);
1524 fprintf (dump_file,
1525 " Called once from %s %i insns.\n",
1526 cgraph_node_name (node->callers->caller),
1527 node->callers->caller->global.size);
1530 if (cgraph_check_inline_limits (node->callers->caller, node,
1531 &reason, false))
1533 struct cgraph_node *caller = node->callers->caller;
1534 cgraph_mark_inline (node->callers);
1535 if (dump_file)
1536 fprintf (dump_file,
1537 " Inlined into %s which now has %i size"
1538 " for a net change of %+i size.\n",
1539 cgraph_node_name (caller),
1540 caller->global.size,
1541 overall_size - old_size);
1543 else
1545 if (dump_file)
1546 fprintf (dump_file,
1547 " Not inlining: %s.\n",
1548 cgraph_inline_failed_string (reason));
1554 /* Free ipa-prop structures if they are no longer needed. */
1555 if (flag_indirect_inlining)
1556 ipa_free_all_structures_after_iinln ();
1558 if (dump_file)
1559 fprintf (dump_file,
1560 "\nInlined %i calls, eliminated %i functions, "
1561 "size %i turned to %i size.\n\n",
1562 ncalls_inlined, nfunctions_inlined, initial_size,
1563 overall_size);
1564 free (order);
1565 return 0;
1568 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1569 in leaf functions. */
1570 static bool
1571 leaf_node_p (struct cgraph_node *n)
1573 struct cgraph_edge *e;
1574 for (e = n->callees; e; e = e->next_callee)
1575 if (!DECL_BUILT_IN (e->callee->decl)
1576 || (!TREE_READONLY (e->callee->decl)
1577 || DECL_PURE_P (e->callee->decl)))
1578 return false;
1579 return true;
1582 /* Decide on the inlining. We do so in the topological order to avoid
1583 expenses on updating data structures. */
1585 static bool
1586 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1587 enum inlining_mode mode)
1589 struct cgraph_edge *e;
1590 bool inlined = false;
1591 cgraph_inline_failed_t failed_reason;
1593 #ifdef ENABLE_CHECKING
1594 verify_cgraph_node (node);
1595 #endif
1597 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1598 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1600 if (dump_file)
1601 fprintf (dump_file, "Incrementally flattening %s\n",
1602 cgraph_node_name (node));
1603 mode = INLINE_ALL;
1606 /* First of all look for always inline functions. */
1607 if (mode != INLINE_SIZE_NORECURSIVE)
1608 for (e = node->callees; e; e = e->next_callee)
1610 if (!e->callee->local.disregard_inline_limits
1611 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1612 continue;
1613 if (e->call_stmt_cannot_inline_p)
1614 continue;
1615 if (dump_file)
1616 fprintf (dump_file,
1617 "Considering to always inline inline candidate %s.\n",
1618 cgraph_node_name (e->callee));
1619 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1621 if (dump_file)
1622 fprintf (dump_file, "Not inlining: recursive call.\n");
1623 continue;
1625 if (!tree_can_inline_p (e))
1627 if (dump_file)
1628 fprintf (dump_file,
1629 "Not inlining: %s",
1630 cgraph_inline_failed_string (e->inline_failed));
1631 continue;
1633 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1634 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1636 if (dump_file)
1637 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1638 continue;
1640 if (!e->callee->analyzed)
1642 if (dump_file)
1643 fprintf (dump_file,
1644 "Not inlining: Function body no longer available.\n");
1645 continue;
1648 if (dump_file)
1649 fprintf (dump_file, " Inlining %s into %s.\n",
1650 cgraph_node_name (e->callee),
1651 cgraph_node_name (e->caller));
1652 cgraph_mark_inline (e);
1653 inlined = true;
1656 /* Now do the automatic inlining. */
1657 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE
1658 /* Never inline regular functions into always-inline functions
1659 during incremental inlining. */
1660 && !node->local.disregard_inline_limits)
1662 bitmap visited = BITMAP_ALLOC (NULL);
1663 for (e = node->callees; e; e = e->next_callee)
1665 int allowed_growth = 0;
1666 if (!e->callee->local.inlinable
1667 || !e->inline_failed
1668 || e->callee->local.disregard_inline_limits)
1669 continue;
1670 /* We are inlining a function to all call-sites in node
1671 or to none. So visit each candidate only once. */
1672 if (!bitmap_set_bit (visited, e->callee->uid))
1673 continue;
1674 if (dump_file)
1675 fprintf (dump_file, "Considering inline candidate %s.\n",
1676 cgraph_node_name (e->callee));
1677 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1679 if (dump_file)
1680 fprintf (dump_file, "Not inlining: recursive call.\n");
1681 continue;
1683 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1684 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1686 if (dump_file)
1687 fprintf (dump_file,
1688 "Not inlining: SSA form does not match.\n");
1689 continue;
1692 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1693 && optimize_function_for_speed_p (cfun))
1694 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1696 /* When the function body would grow and inlining the function
1697 won't eliminate the need for offline copy of the function,
1698 don't inline. */
1699 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1700 || (!flag_inline_functions
1701 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1702 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1703 > e->caller->global.size + allowed_growth)
1704 && cgraph_estimate_growth (e->callee) > allowed_growth)
1706 if (dump_file)
1707 fprintf (dump_file,
1708 "Not inlining: code size would grow by %i.\n",
1709 cgraph_estimate_size_after_inlining (1, e->caller,
1710 e->callee)
1711 - e->caller->global.size);
1712 continue;
1714 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1715 false)
1716 || e->call_stmt_cannot_inline_p)
1718 if (dump_file)
1719 fprintf (dump_file, "Not inlining: %s.\n",
1720 cgraph_inline_failed_string (e->inline_failed));
1721 continue;
1723 if (!e->callee->analyzed)
1725 if (dump_file)
1726 fprintf (dump_file,
1727 "Not inlining: Function body no longer available.\n");
1728 continue;
1730 if (!tree_can_inline_p (e))
1732 if (dump_file)
1733 fprintf (dump_file,
1734 "Not inlining: %s.",
1735 cgraph_inline_failed_string (e->inline_failed));
1736 continue;
1738 if (cgraph_default_inline_p (e->callee, &failed_reason))
1740 if (dump_file)
1741 fprintf (dump_file, " Inlining %s into %s.\n",
1742 cgraph_node_name (e->callee),
1743 cgraph_node_name (e->caller));
1744 cgraph_mark_inline (e);
1745 inlined = true;
1748 BITMAP_FREE (visited);
1750 return inlined;
1753 /* Because inlining might remove no-longer reachable nodes, we need to
1754 keep the array visible to garbage collector to avoid reading collected
1755 out nodes. */
1756 static int nnodes;
1757 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1759 /* Do inlining of small functions. Doing so early helps profiling and other
1760 passes to be somewhat more effective and avoids some code duplication in
1761 later real inlining pass for testcases with very many function calls. */
1762 static unsigned int
1763 cgraph_early_inlining (void)
1765 struct cgraph_node *node = cgraph_node (current_function_decl);
1766 unsigned int todo = 0;
1767 int iterations = 0;
1769 if (seen_error ())
1770 return 0;
1772 if (!optimize
1773 || flag_no_inline
1774 || !flag_early_inlining)
1776 /* When not optimizing or not inlining inline only always-inline
1777 functions. */
1778 cgraph_decide_inlining_incrementally (node, INLINE_ALWAYS_INLINE);
1779 timevar_push (TV_INTEGRATION);
1780 todo |= optimize_inline_calls (current_function_decl);
1781 timevar_pop (TV_INTEGRATION);
1783 else
1785 if (lookup_attribute ("flatten",
1786 DECL_ATTRIBUTES (node->decl)) != NULL)
1788 if (dump_file)
1789 fprintf (dump_file,
1790 "Flattening %s\n", cgraph_node_name (node));
1791 cgraph_flatten (node);
1792 timevar_push (TV_INTEGRATION);
1793 todo |= optimize_inline_calls (current_function_decl);
1794 timevar_pop (TV_INTEGRATION);
1796 /* We iterate incremental inlining to get trivial cases of indirect
1797 inlining. */
1798 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1799 && cgraph_decide_inlining_incrementally (node,
1800 iterations
1801 ? INLINE_SIZE_NORECURSIVE
1802 : INLINE_SIZE))
1804 timevar_push (TV_INTEGRATION);
1805 todo |= optimize_inline_calls (current_function_decl);
1806 iterations++;
1807 timevar_pop (TV_INTEGRATION);
1809 if (dump_file)
1810 fprintf (dump_file, "Iterations: %i\n", iterations);
1813 cfun->always_inline_functions_inlined = true;
1815 return todo;
1818 struct gimple_opt_pass pass_early_inline =
1821 GIMPLE_PASS,
1822 "einline", /* name */
1823 NULL, /* gate */
1824 cgraph_early_inlining, /* execute */
1825 NULL, /* sub */
1826 NULL, /* next */
1827 0, /* static_pass_number */
1828 TV_INLINE_HEURISTICS, /* tv_id */
1829 0, /* properties_required */
1830 0, /* properties_provided */
1831 0, /* properties_destroyed */
1832 0, /* todo_flags_start */
1833 TODO_dump_func /* todo_flags_finish */
1837 /* When inlining shall be performed. */
1838 static bool
1839 cgraph_gate_ipa_early_inlining (void)
1841 return (flag_early_inlining
1842 && !in_lto_p
1843 && (flag_branch_probabilities || flag_test_coverage
1844 || profile_arc_flag));
1847 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1848 before tree profiling so we have stand alone IPA pass for doing so. */
1849 struct simple_ipa_opt_pass pass_ipa_early_inline =
1852 SIMPLE_IPA_PASS,
1853 "einline_ipa", /* name */
1854 cgraph_gate_ipa_early_inlining, /* gate */
1855 NULL, /* execute */
1856 NULL, /* sub */
1857 NULL, /* next */
1858 0, /* static_pass_number */
1859 TV_INLINE_HEURISTICS, /* tv_id */
1860 0, /* properties_required */
1861 0, /* properties_provided */
1862 0, /* properties_destroyed */
1863 0, /* todo_flags_start */
1864 TODO_dump_cgraph /* todo_flags_finish */
1868 /* See if statement might disappear after inlining. We are not terribly
1869 sophisficated, basically looking for simple abstraction penalty wrappers. */
1871 static bool
1872 likely_eliminated_by_inlining_p (gimple stmt)
1874 enum gimple_code code = gimple_code (stmt);
1875 switch (code)
1877 case GIMPLE_RETURN:
1878 return true;
1879 case GIMPLE_ASSIGN:
1880 if (gimple_num_ops (stmt) != 2)
1881 return false;
1883 /* Casts of parameters, loads from parameters passed by reference
1884 and stores to return value or parameters are probably free after
1885 inlining. */
1886 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1887 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1888 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1889 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1891 tree rhs = gimple_assign_rhs1 (stmt);
1892 tree lhs = gimple_assign_lhs (stmt);
1893 tree inner_rhs = rhs;
1894 tree inner_lhs = lhs;
1895 bool rhs_free = false;
1896 bool lhs_free = false;
1898 while (handled_component_p (inner_lhs)
1899 || TREE_CODE (inner_lhs) == MEM_REF)
1900 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1901 while (handled_component_p (inner_rhs)
1902 || TREE_CODE (inner_rhs) == ADDR_EXPR
1903 || TREE_CODE (inner_rhs) == MEM_REF)
1904 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1907 if (TREE_CODE (inner_rhs) == PARM_DECL
1908 || (TREE_CODE (inner_rhs) == SSA_NAME
1909 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1910 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1911 rhs_free = true;
1912 if (rhs_free && is_gimple_reg (lhs))
1913 lhs_free = true;
1914 if (((TREE_CODE (inner_lhs) == PARM_DECL
1915 || (TREE_CODE (inner_lhs) == SSA_NAME
1916 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1917 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1918 && inner_lhs != lhs)
1919 || TREE_CODE (inner_lhs) == RESULT_DECL
1920 || (TREE_CODE (inner_lhs) == SSA_NAME
1921 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1922 lhs_free = true;
1923 if (lhs_free
1924 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1925 rhs_free = true;
1926 if (lhs_free && rhs_free)
1927 return true;
1929 return false;
1930 default:
1931 return false;
1935 /* Compute function body size parameters for NODE. */
1937 static void
1938 estimate_function_body_sizes (struct cgraph_node *node)
1940 gcov_type time = 0;
1941 gcov_type time_inlining_benefit = 0;
1942 int size = 0;
1943 int size_inlining_benefit = 0;
1944 basic_block bb;
1945 gimple_stmt_iterator bsi;
1946 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1947 tree arg;
1948 int freq;
1949 tree funtype = TREE_TYPE (node->decl);
1951 if (dump_file)
1952 fprintf (dump_file, "Analyzing function body size: %s\n",
1953 cgraph_node_name (node));
1955 gcc_assert (my_function && my_function->cfg);
1956 FOR_EACH_BB_FN (bb, my_function)
1958 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1959 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1961 gimple stmt = gsi_stmt (bsi);
1962 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1963 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1965 if (dump_file && (dump_flags & TDF_DETAILS))
1967 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1968 freq, this_size, this_time);
1969 print_gimple_stmt (dump_file, stmt, 0, 0);
1971 this_time *= freq;
1972 time += this_time;
1973 size += this_size;
1974 if (likely_eliminated_by_inlining_p (stmt))
1976 size_inlining_benefit += this_size;
1977 time_inlining_benefit += this_time;
1978 if (dump_file && (dump_flags & TDF_DETAILS))
1979 fprintf (dump_file, " Likely eliminated\n");
1981 gcc_assert (time >= 0);
1982 gcc_assert (size >= 0);
1985 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1986 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1987 / CGRAPH_FREQ_BASE);
1988 if (dump_file)
1989 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1990 (int)time, (int)time_inlining_benefit,
1991 size, size_inlining_benefit);
1992 time_inlining_benefit += eni_time_weights.call_cost;
1993 size_inlining_benefit += eni_size_weights.call_cost;
1994 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1996 int cost = estimate_move_cost (TREE_TYPE (funtype));
1997 time_inlining_benefit += cost;
1998 size_inlining_benefit += cost;
2000 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = DECL_CHAIN (arg))
2001 if (!VOID_TYPE_P (TREE_TYPE (arg)))
2003 int cost = estimate_move_cost (TREE_TYPE (arg));
2004 time_inlining_benefit += cost;
2005 size_inlining_benefit += cost;
2007 if (time_inlining_benefit > MAX_TIME)
2008 time_inlining_benefit = MAX_TIME;
2009 if (time > MAX_TIME)
2010 time = MAX_TIME;
2011 inline_summary (node)->self_time = time;
2012 inline_summary (node)->self_size = size;
2013 if (dump_file)
2014 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
2015 (int)time, (int)time_inlining_benefit,
2016 size, size_inlining_benefit);
2017 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
2018 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
2021 /* Compute parameters of functions used by inliner. */
2022 unsigned int
2023 compute_inline_parameters (struct cgraph_node *node)
2025 HOST_WIDE_INT self_stack_size;
2027 gcc_assert (!node->global.inlined_to);
2029 /* Estimate the stack size for the function. But not at -O0
2030 because estimated_stack_frame_size is a quadratic problem. */
2031 self_stack_size = optimize ? estimated_stack_frame_size (node->decl) : 0;
2032 inline_summary (node)->estimated_self_stack_size = self_stack_size;
2033 node->global.estimated_stack_size = self_stack_size;
2034 node->global.stack_frame_offset = 0;
2036 /* Can this function be inlined at all? */
2037 node->local.inlinable = tree_inlinable_function_p (node->decl);
2038 if (node->local.inlinable && !node->local.disregard_inline_limits)
2039 node->local.disregard_inline_limits
2040 = DECL_DISREGARD_INLINE_LIMITS (node->decl);
2041 estimate_function_body_sizes (node);
2042 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2043 node->global.time = inline_summary (node)->self_time;
2044 node->global.size = inline_summary (node)->self_size;
2045 return 0;
2049 /* Compute parameters of functions used by inliner using
2050 current_function_decl. */
2051 static unsigned int
2052 compute_inline_parameters_for_current (void)
2054 compute_inline_parameters (cgraph_node (current_function_decl));
2055 return 0;
2058 struct gimple_opt_pass pass_inline_parameters =
2061 GIMPLE_PASS,
2062 "inline_param", /* name */
2063 NULL, /* gate */
2064 compute_inline_parameters_for_current,/* execute */
2065 NULL, /* sub */
2066 NULL, /* next */
2067 0, /* static_pass_number */
2068 TV_INLINE_HEURISTICS, /* tv_id */
2069 0, /* properties_required */
2070 0, /* properties_provided */
2071 0, /* properties_destroyed */
2072 0, /* todo_flags_start */
2073 0 /* todo_flags_finish */
2077 /* This function performs intraprocedural analyzis in NODE that is required to
2078 inline indirect calls. */
2079 static void
2080 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
2082 ipa_analyze_node (node);
2083 if (dump_file && (dump_flags & TDF_DETAILS))
2085 ipa_print_node_params (dump_file, node);
2086 ipa_print_node_jump_functions (dump_file, node);
2090 /* Note function body size. */
2091 static void
2092 analyze_function (struct cgraph_node *node)
2094 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2095 current_function_decl = node->decl;
2097 compute_inline_parameters (node);
2098 /* FIXME: We should remove the optimize check after we ensure we never run
2099 IPA passes when not optimizng. */
2100 if (flag_indirect_inlining && optimize)
2101 inline_indirect_intraprocedural_analysis (node);
2103 current_function_decl = NULL;
2104 pop_cfun ();
2107 /* Called when new function is inserted to callgraph late. */
2108 static void
2109 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
2111 analyze_function (node);
2114 /* Note function body size. */
2115 static void
2116 inline_generate_summary (void)
2118 struct cgraph_node *node;
2120 function_insertion_hook_holder =
2121 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2123 if (flag_indirect_inlining)
2125 ipa_register_cgraph_hooks ();
2126 ipa_check_create_node_params ();
2127 ipa_check_create_edge_args ();
2130 for (node = cgraph_nodes; node; node = node->next)
2131 if (node->analyzed)
2132 analyze_function (node);
2134 return;
2137 /* Apply inline plan to function. */
2138 static unsigned int
2139 inline_transform (struct cgraph_node *node)
2141 unsigned int todo = 0;
2142 struct cgraph_edge *e;
2143 bool inline_p = false;
2145 /* FIXME: Currently the passmanager is adding inline transform more than once to some
2146 clones. This needs revisiting after WPA cleanups. */
2147 if (cfun->after_inlining)
2148 return 0;
2150 /* We might need the body of this function so that we can expand
2151 it inline somewhere else. */
2152 if (cgraph_preserve_function_body_p (node->decl))
2153 save_inline_function_body (node);
2155 for (e = node->callees; e; e = e->next_callee)
2157 cgraph_redirect_edge_call_stmt_to_callee (e);
2158 if (!e->inline_failed || warn_inline)
2159 inline_p = true;
2162 if (inline_p)
2164 timevar_push (TV_INTEGRATION);
2165 todo = optimize_inline_calls (current_function_decl);
2166 timevar_pop (TV_INTEGRATION);
2168 cfun->always_inline_functions_inlined = true;
2169 cfun->after_inlining = true;
2170 return todo | execute_fixup_cfg ();
2173 /* Read inline summary. Jump functions are shared among ipa-cp
2174 and inliner, so when ipa-cp is active, we don't need to write them
2175 twice. */
2177 static void
2178 inline_read_summary (void)
2180 if (flag_indirect_inlining)
2182 ipa_register_cgraph_hooks ();
2183 if (!flag_ipa_cp)
2184 ipa_prop_read_jump_functions ();
2186 function_insertion_hook_holder =
2187 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2190 /* Write inline summary for node in SET.
2191 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
2192 active, we don't need to write them twice. */
2194 static void
2195 inline_write_summary (cgraph_node_set set,
2196 varpool_node_set vset ATTRIBUTE_UNUSED)
2198 if (flag_indirect_inlining && !flag_ipa_cp)
2199 ipa_prop_write_jump_functions (set);
2202 /* When to run IPA inlining. Inlining of always-inline functions
2203 happens during early inlining. */
2205 static bool
2206 gate_cgraph_decide_inlining (void)
2208 /* ??? We'd like to skip this if not optimizing or not inlining as
2209 all always-inline functions have been processed by early
2210 inlining already. But this at least breaks EH with C++ as
2211 we need to unconditionally run fixup_cfg even at -O0.
2212 So leave it on unconditionally for now. */
2213 return 1;
2216 struct ipa_opt_pass_d pass_ipa_inline =
2219 IPA_PASS,
2220 "inline", /* name */
2221 gate_cgraph_decide_inlining, /* gate */
2222 cgraph_decide_inlining, /* execute */
2223 NULL, /* sub */
2224 NULL, /* next */
2225 0, /* static_pass_number */
2226 TV_INLINE_HEURISTICS, /* tv_id */
2227 0, /* properties_required */
2228 0, /* properties_provided */
2229 0, /* properties_destroyed */
2230 TODO_remove_functions, /* todo_flags_finish */
2231 TODO_dump_cgraph | TODO_dump_func
2232 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
2234 inline_generate_summary, /* generate_summary */
2235 inline_write_summary, /* write_summary */
2236 inline_read_summary, /* read_summary */
2237 NULL, /* write_optimization_summary */
2238 NULL, /* read_optimization_summary */
2239 NULL, /* stmt_fixup */
2240 0, /* TODOs */
2241 inline_transform, /* function_transform */
2242 NULL, /* variable_transform */
2246 #include "gt-ipa-inline.h"