PR target/44750
[official-gcc.git] / gcc / ipa-inline.c
blob7241dcb0d48a5fcd300cbaa3c3bc5c2e5821a9ce
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
29 There are three major parts of this file:
31 cgraph_mark_inline implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
43 inlining heuristics
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used by early inliner.
66 The inliner itself is split into several passes:
68 pass_inline_parameters
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
79 pass_early_inlining
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
86 The pass is run during conversion into SSA form. Only functions already
87 converted into SSA form are inlined, so the conversion must happen in
88 topological order on the callgraph (that is maintained by pass manager).
89 The functions after inlining are early optimized so the early inliner sees
90 unoptimized function itself, but all considered callees are already
91 optimized allowing it to unfold abstraction penalty on C++ effectively and
92 cheaply.
94 pass_ipa_early_inlining
96 With profiling, the early inlining is also necessary to reduce
97 instrumentation costs on program with high abstraction penalty (doing
98 many redundant calls). This can't happen in parallel with early
99 optimization and profile instrumentation, because we would end up
100 re-instrumenting already instrumented function bodies we brought in via
101 inlining.
103 To avoid this, this pass is executed as IPA pass before profiling. It is
104 simple wrapper to pass_early_inlining and ensures first inlining.
106 pass_ipa_inline
108 This is the main pass implementing simple greedy algorithm to do inlining
109 of small functions that results in overall growth of compilation unit and
110 inlining of functions called once. The pass compute just so called inline
111 plan (representation of inlining to be done in callgraph) and unlike early
112 inlining it is not performing the inlining itself.
114 pass_apply_inline
116 This pass performs actual inlining according to pass_ipa_inline on given
117 function. Possible the function body before inlining is saved when it is
118 needed for further inlining later.
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "tm.h"
125 #include "tree.h"
126 #include "tree-inline.h"
127 #include "langhooks.h"
128 #include "flags.h"
129 #include "cgraph.h"
130 #include "diagnostic.h"
131 #include "gimple-pretty-print.h"
132 #include "timevar.h"
133 #include "params.h"
134 #include "fibheap.h"
135 #include "intl.h"
136 #include "tree-pass.h"
137 #include "hashtab.h"
138 #include "coverage.h"
139 #include "ggc.h"
140 #include "tree-flow.h"
141 #include "rtl.h"
142 #include "ipa-prop.h"
143 #include "except.h"
145 #define MAX_TIME 1000000000
147 /* Mode incremental inliner operate on:
149 In ALWAYS_INLINE only functions marked
150 always_inline are inlined. This mode is used after detecting cycle during
151 flattening.
153 In SIZE mode, only functions that reduce function body size after inlining
154 are inlined, this is used during early inlining.
156 in ALL mode, everything is inlined. This is used during flattening. */
157 enum inlining_mode {
158 INLINE_NONE = 0,
159 INLINE_ALWAYS_INLINE,
160 INLINE_SIZE_NORECURSIVE,
161 INLINE_SIZE,
162 INLINE_ALL
165 static bool
166 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode);
167 static void cgraph_flatten (struct cgraph_node *node);
170 /* Statistics we collect about inlining algorithm. */
171 static int ncalls_inlined;
172 static int nfunctions_inlined;
173 static int overall_size;
174 static gcov_type max_count, max_benefit;
176 /* Holders of ipa cgraph hooks: */
177 static struct cgraph_node_hook_list *function_insertion_hook_holder;
179 static inline struct inline_summary *
180 inline_summary (struct cgraph_node *node)
182 return &node->local.inline_summary;
185 /* Estimate self time of the function after inlining WHAT into TO. */
187 static int
188 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
189 struct cgraph_node *what)
191 gcov_type time = (((gcov_type)what->global.time
192 - inline_summary (what)->time_inlining_benefit)
193 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
194 + to->global.time;
195 if (time < 0)
196 time = 0;
197 if (time > MAX_TIME)
198 time = MAX_TIME;
199 return time;
202 /* Estimate self time of the function after inlining WHAT into TO. */
204 static inline int
205 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
206 struct cgraph_node *what)
208 int size = ((what->global.size - inline_summary (what)->size_inlining_benefit)
209 * times + to->global.size);
210 gcc_assert (size >= 0);
211 return size;
214 /* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
215 by NEST. */
217 static void
218 update_noncloned_frequencies (struct cgraph_node *node,
219 int freq_scale, int nest)
221 struct cgraph_edge *e;
223 /* We do not want to ignore high loop nest after freq drops to 0. */
224 if (!freq_scale)
225 freq_scale = 1;
226 for (e = node->callees; e; e = e->next_callee)
228 e->loop_nest += nest;
229 e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
230 if (e->frequency > CGRAPH_FREQ_MAX)
231 e->frequency = CGRAPH_FREQ_MAX;
232 if (!e->inline_failed)
233 update_noncloned_frequencies (e->callee, freq_scale, nest);
237 /* E is expected to be an edge being inlined. Clone destination node of
238 the edge and redirect it to the new clone.
239 DUPLICATE is used for bookkeeping on whether we are actually creating new
240 clones or re-using node originally representing out-of-line function call.
242 void
243 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
244 bool update_original)
246 HOST_WIDE_INT peak;
248 if (duplicate)
250 /* We may eliminate the need for out-of-line copy to be output.
251 In that case just go ahead and re-use it. */
252 if (!e->callee->callers->next_caller
253 /* FIXME: When address is taken of DECL_EXTERNAL function we still can remove its
254 offline copy, but we would need to keep unanalyzed node in the callgraph so
255 references can point to it. */
256 && !e->callee->address_taken
257 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
258 /* Inlining might enable more devirtualizing, so we want to remove
259 those only after all devirtualizable virtual calls are processed.
260 Lacking may edges in callgraph we just preserve them post
261 inlining. */
262 && (!DECL_VIRTUAL_P (e->callee->decl)
263 || (!DECL_COMDAT (e->callee->decl) && !DECL_EXTERNAL (e->callee->decl)))
264 /* Don't reuse if more than one function shares a comdat group.
265 If the other function(s) are needed, we need to emit even
266 this function out of line. */
267 && !e->callee->same_comdat_group
268 && !cgraph_new_nodes)
270 gcc_assert (!e->callee->global.inlined_to);
271 if (e->callee->analyzed && !DECL_EXTERNAL (e->callee->decl))
273 overall_size -= e->callee->global.size;
274 nfunctions_inlined++;
276 duplicate = false;
277 e->callee->local.externally_visible = false;
278 update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
280 else
282 struct cgraph_node *n;
283 n = cgraph_clone_node (e->callee, e->callee->decl,
284 e->count, e->frequency, e->loop_nest,
285 update_original, NULL);
286 cgraph_redirect_edge_callee (e, n);
290 if (e->caller->global.inlined_to)
291 e->callee->global.inlined_to = e->caller->global.inlined_to;
292 else
293 e->callee->global.inlined_to = e->caller;
294 e->callee->global.stack_frame_offset
295 = e->caller->global.stack_frame_offset
296 + inline_summary (e->caller)->estimated_self_stack_size;
297 peak = e->callee->global.stack_frame_offset
298 + inline_summary (e->callee)->estimated_self_stack_size;
299 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
300 e->callee->global.inlined_to->global.estimated_stack_size = peak;
301 cgraph_propagate_frequency (e->callee);
303 /* Recursively clone all bodies. */
304 for (e = e->callee->callees; e; e = e->next_callee)
305 if (!e->inline_failed)
306 cgraph_clone_inlined_nodes (e, duplicate, update_original);
309 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
310 specify whether profile of original function should be updated. If any new
311 indirect edges are discovered in the process, add them to NEW_EDGES, unless
312 it is NULL. Return true iff any new callgraph edges were discovered as a
313 result of inlining. */
315 static bool
316 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
317 VEC (cgraph_edge_p, heap) **new_edges)
319 int old_size = 0, new_size = 0;
320 struct cgraph_node *to = NULL, *what;
321 struct cgraph_edge *curr = e;
322 int freq;
324 gcc_assert (e->inline_failed);
325 e->inline_failed = CIF_OK;
326 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
328 cgraph_clone_inlined_nodes (e, true, update_original);
330 what = e->callee;
332 freq = e->frequency;
333 /* Now update size of caller and all functions caller is inlined into. */
334 for (;e && !e->inline_failed; e = e->caller->callers)
336 to = e->caller;
337 old_size = e->caller->global.size;
338 new_size = cgraph_estimate_size_after_inlining (1, to, what);
339 to->global.size = new_size;
340 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
342 gcc_assert (what->global.inlined_to == to);
343 if (new_size > old_size)
344 overall_size += new_size - old_size;
345 ncalls_inlined++;
347 /* FIXME: We should remove the optimize check after we ensure we never run
348 IPA passes when not optimizng. */
349 if (flag_indirect_inlining && optimize)
350 return ipa_propagate_indirect_call_infos (curr, new_edges);
351 else
352 return false;
355 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER. */
357 static void
358 cgraph_mark_inline (struct cgraph_edge *edge)
360 struct cgraph_node *to = edge->caller;
361 struct cgraph_node *what = edge->callee;
362 struct cgraph_edge *e, *next;
364 gcc_assert (!edge->call_stmt_cannot_inline_p);
365 /* Look for all calls, mark them inline and clone recursively
366 all inlined functions. */
367 for (e = what->callers; e; e = next)
369 next = e->next_caller;
370 if (e->caller == to && e->inline_failed)
372 cgraph_mark_inline_edge (e, true, NULL);
373 if (e == edge)
374 edge = next;
379 /* Estimate the growth caused by inlining NODE into all callees. */
381 static int
382 cgraph_estimate_growth (struct cgraph_node *node)
384 int growth = 0;
385 struct cgraph_edge *e;
386 bool self_recursive = false;
388 if (node->global.estimated_growth != INT_MIN)
389 return node->global.estimated_growth;
391 for (e = node->callers; e; e = e->next_caller)
393 if (e->caller == node)
394 self_recursive = true;
395 if (e->inline_failed)
396 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
397 - e->caller->global.size);
400 /* ??? Wrong for non-trivially self recursive functions or cases where
401 we decide to not inline for different reasons, but it is not big deal
402 as in that case we will keep the body around, but we will also avoid
403 some inlining. */
404 if (cgraph_will_be_removed_from_program_if_no_direct_calls (node)
405 && !DECL_EXTERNAL (node->decl) && !self_recursive)
406 growth -= node->global.size;
408 node->global.estimated_growth = growth;
409 return growth;
412 /* Return false when inlining WHAT into TO is not good idea
413 as it would cause too large growth of function bodies.
414 When ONE_ONLY is true, assume that only one call site is going
415 to be inlined, otherwise figure out how many call sites in
416 TO calls WHAT and verify that all can be inlined.
419 static bool
420 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
421 cgraph_inline_failed_t *reason, bool one_only)
423 int times = 0;
424 struct cgraph_edge *e;
425 int newsize;
426 int limit;
427 HOST_WIDE_INT stack_size_limit, inlined_stack;
429 if (one_only)
430 times = 1;
431 else
432 for (e = to->callees; e; e = e->next_callee)
433 if (e->callee == what)
434 times++;
436 if (to->global.inlined_to)
437 to = to->global.inlined_to;
439 /* When inlining large function body called once into small function,
440 take the inlined function as base for limiting the growth. */
441 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
442 limit = inline_summary (to)->self_size;
443 else
444 limit = inline_summary (what)->self_size;
446 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
448 /* Check the size after inlining against the function limits. But allow
449 the function to shrink if it went over the limits by forced inlining. */
450 newsize = cgraph_estimate_size_after_inlining (times, to, what);
451 if (newsize >= to->global.size
452 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
453 && newsize > limit)
455 if (reason)
456 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
457 return false;
460 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
462 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
464 inlined_stack = (to->global.stack_frame_offset
465 + inline_summary (to)->estimated_self_stack_size
466 + what->global.estimated_stack_size);
467 if (inlined_stack > stack_size_limit
468 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
470 if (reason)
471 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
472 return false;
474 return true;
477 /* Return true when function N is small enough to be inlined. */
479 static bool
480 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
482 tree decl = n->decl;
484 if (n->local.disregard_inline_limits)
485 return true;
487 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
489 if (reason)
490 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
491 return false;
493 if (!n->analyzed)
495 if (reason)
496 *reason = CIF_BODY_NOT_AVAILABLE;
497 return false;
499 if (cgraph_function_body_availability (n) <= AVAIL_OVERWRITABLE)
501 if (reason)
502 *reason = CIF_OVERWRITABLE;
503 return false;
507 if (DECL_DECLARED_INLINE_P (decl))
509 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
511 if (reason)
512 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
513 return false;
516 else
518 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
520 if (reason)
521 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
522 return false;
526 return true;
529 /* Return true when inlining WHAT would create recursive inlining.
530 We call recursive inlining all cases where same function appears more than
531 once in the single recursion nest path in the inline graph. */
533 static inline bool
534 cgraph_recursive_inlining_p (struct cgraph_node *to,
535 struct cgraph_node *what,
536 cgraph_inline_failed_t *reason)
538 bool recursive;
539 if (to->global.inlined_to)
540 recursive = what->decl == to->global.inlined_to->decl;
541 else
542 recursive = what->decl == to->decl;
543 /* Marking recursive function inline has sane semantic and thus we should
544 not warn on it. */
545 if (recursive && reason)
546 *reason = (what->local.disregard_inline_limits
547 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
548 return recursive;
551 /* A cost model driving the inlining heuristics in a way so the edges with
552 smallest badness are inlined first. After each inlining is performed
553 the costs of all caller edges of nodes affected are recomputed so the
554 metrics may accurately depend on values such as number of inlinable callers
555 of the function or function body size. */
557 static int
558 cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
560 gcov_type badness;
561 int growth =
562 (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
563 - edge->caller->global.size);
565 if (edge->callee->local.disregard_inline_limits)
566 return INT_MIN;
568 if (dump)
570 fprintf (dump_file, " Badness calculcation for %s -> %s\n",
571 cgraph_node_name (edge->caller),
572 cgraph_node_name (edge->callee));
573 fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
574 growth,
575 edge->callee->global.time,
576 inline_summary (edge->callee)->time_inlining_benefit,
577 edge->callee->global.size,
578 inline_summary (edge->callee)->size_inlining_benefit);
581 /* Always prefer inlining saving code size. */
582 if (growth <= 0)
584 badness = INT_MIN - growth;
585 if (dump)
586 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
587 growth);
590 /* When profiling is available, base priorities -(#calls / growth).
591 So we optimize for overall number of "executed" inlined calls. */
592 else if (max_count)
594 badness =
595 ((int)
596 ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
597 (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
598 if (dump)
600 fprintf (dump_file,
601 " %i (relative %f): profile info. Relative count %f"
602 " * Relative benefit %f\n",
603 (int) badness, (double) badness / INT_MIN,
604 (double) edge->count / max_count,
605 (double) (inline_summary (edge->callee)->
606 time_inlining_benefit + 1) / (max_benefit + 1));
610 /* When function local profile is available, base priorities on
611 growth / frequency, so we optimize for overall frequency of inlined
612 calls. This is not too accurate since while the call might be frequent
613 within function, the function itself is infrequent.
615 Other objective to optimize for is number of different calls inlined.
616 We add the estimated growth after inlining all functions to bias the
617 priorities slightly in this direction (so fewer times called functions
618 of the same size gets priority). */
619 else if (flag_guess_branch_prob)
621 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
622 int benefitperc;
623 int growth_for_all;
624 badness = growth * 10000;
625 benefitperc =
626 MIN (100 * inline_summary (edge->callee)->time_inlining_benefit /
627 (edge->callee->global.time + 1) +1, 100);
628 div *= benefitperc;
631 /* Decrease badness if call is nested. */
632 /* Compress the range so we don't overflow. */
633 if (div > 10000)
634 div = 10000 + ceil_log2 (div) - 8;
635 if (div < 1)
636 div = 1;
637 if (badness > 0)
638 badness /= div;
639 growth_for_all = cgraph_estimate_growth (edge->callee);
640 badness += growth_for_all;
641 if (badness > INT_MAX)
642 badness = INT_MAX;
643 if (dump)
645 fprintf (dump_file,
646 " %i: guessed profile. frequency %i, overall growth %i,"
647 " benefit %i%%, divisor %i\n",
648 (int) badness, edge->frequency, growth_for_all, benefitperc, div);
651 /* When function local profile is not available or it does not give
652 useful information (ie frequency is zero), base the cost on
653 loop nest and overall size growth, so we optimize for overall number
654 of functions fully inlined in program. */
655 else
657 int nest = MIN (edge->loop_nest, 8);
658 badness = cgraph_estimate_growth (edge->callee) * 256;
660 /* Decrease badness if call is nested. */
661 if (badness > 0)
662 badness >>= nest;
663 else
665 badness <<= nest;
667 if (dump)
668 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
669 nest);
672 /* Ensure that we did not overflow in all the fixed point math above. */
673 gcc_assert (badness >= INT_MIN);
674 gcc_assert (badness <= INT_MAX - 1);
675 /* Make recursive inlining happen always after other inlining is done. */
676 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
677 return badness + 1;
678 else
679 return badness;
682 /* Recompute badness of EDGE and update its key in HEAP if needed. */
683 static void
684 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
686 int badness = cgraph_edge_badness (edge, false);
687 if (edge->aux)
689 fibnode_t n = (fibnode_t) edge->aux;
690 gcc_checking_assert (n->data == edge);
692 /* fibheap_replace_key only decrease the keys.
693 When we increase the key we do not update heap
694 and instead re-insert the element once it becomes
695 a minium of heap. */
696 if (badness < n->key)
698 fibheap_replace_key (heap, n, badness);
699 gcc_checking_assert (n->key == badness);
702 else
703 edge->aux = fibheap_insert (heap, badness, edge);
706 /* Recompute heap nodes for each of caller edge. */
708 static void
709 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
710 bitmap updated_nodes)
712 struct cgraph_edge *edge;
713 cgraph_inline_failed_t failed_reason;
715 if (!node->local.inlinable
716 || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
717 || node->global.inlined_to)
718 return;
719 if (!bitmap_set_bit (updated_nodes, node->uid))
720 return;
721 node->global.estimated_growth = INT_MIN;
723 /* See if there is something to do. */
724 for (edge = node->callers; edge; edge = edge->next_caller)
725 if (edge->inline_failed)
726 break;
727 if (!edge)
728 return;
729 /* Prune out edges we won't inline into anymore. */
730 if (!cgraph_default_inline_p (node, &failed_reason))
732 for (; edge; edge = edge->next_caller)
733 if (edge->aux)
735 fibheap_delete_node (heap, (fibnode_t) edge->aux);
736 edge->aux = NULL;
737 if (edge->inline_failed)
738 edge->inline_failed = failed_reason;
740 return;
743 for (; edge; edge = edge->next_caller)
744 if (edge->inline_failed)
745 update_edge_key (heap, edge);
748 /* Recompute heap nodes for each uninlined call.
749 This is used when we know that edge badnesses are going only to increase
750 (we introduced new call site) and thus all we need is to insert newly
751 created edges into heap. */
753 static void
754 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
755 bitmap updated_nodes)
757 struct cgraph_edge *e = node->callees;
758 node->global.estimated_growth = INT_MIN;
760 if (!e)
761 return;
762 while (true)
763 if (!e->inline_failed && e->callee->callees)
764 e = e->callee->callees;
765 else
767 if (e->inline_failed
768 && e->callee->local.inlinable
769 && cgraph_function_body_availability (e->callee) >= AVAIL_AVAILABLE
770 && !bitmap_bit_p (updated_nodes, e->callee->uid))
772 node->global.estimated_growth = INT_MIN;
773 /* If function becomes uninlinable, we need to remove it from the heap. */
774 if (!cgraph_default_inline_p (e->callee, &e->inline_failed))
775 update_caller_keys (heap, e->callee, updated_nodes);
776 else
777 /* Otherwise update just edge E. */
778 update_edge_key (heap, e);
780 if (e->next_callee)
781 e = e->next_callee;
782 else
786 if (e->caller == node)
787 return;
788 e = e->caller->callers;
790 while (!e->next_callee);
791 e = e->next_callee;
796 /* Recompute heap nodes for each of caller edges of each of callees.
797 Walk recursively into all inline clones. */
799 static void
800 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
801 bitmap updated_nodes)
803 struct cgraph_edge *e = node->callees;
804 node->global.estimated_growth = INT_MIN;
806 if (!e)
807 return;
808 while (true)
809 if (!e->inline_failed && e->callee->callees)
810 e = e->callee->callees;
811 else
813 if (e->inline_failed)
814 update_caller_keys (heap, e->callee, updated_nodes);
815 if (e->next_callee)
816 e = e->next_callee;
817 else
821 if (e->caller == node)
822 return;
823 e = e->caller->callers;
825 while (!e->next_callee);
826 e = e->next_callee;
831 /* Enqueue all recursive calls from NODE into priority queue depending on
832 how likely we want to recursively inline the call. */
834 static void
835 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
836 fibheap_t heap)
838 static int priority;
839 struct cgraph_edge *e;
840 for (e = where->callees; e; e = e->next_callee)
841 if (e->callee == node)
843 /* When profile feedback is available, prioritize by expected number
844 of calls. Without profile feedback we maintain simple queue
845 to order candidates via recursive depths. */
846 fibheap_insert (heap,
847 !max_count ? priority++
848 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
851 for (e = where->callees; e; e = e->next_callee)
852 if (!e->inline_failed)
853 lookup_recursive_calls (node, e->callee, heap);
856 /* Decide on recursive inlining: in the case function has recursive calls,
857 inline until body size reaches given argument. If any new indirect edges
858 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
859 is NULL. */
861 static bool
862 cgraph_decide_recursive_inlining (struct cgraph_node *node,
863 VEC (cgraph_edge_p, heap) **new_edges)
865 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
866 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
867 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
868 fibheap_t heap;
869 struct cgraph_edge *e;
870 struct cgraph_node *master_clone, *next;
871 int depth = 0;
872 int n = 0;
874 /* It does not make sense to recursively inline always-inline functions
875 as we are going to sorry() on the remaining calls anyway. */
876 if (node->local.disregard_inline_limits
877 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl)))
878 return false;
880 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
881 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
882 return false;
884 if (DECL_DECLARED_INLINE_P (node->decl))
886 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
887 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
890 /* Make sure that function is small enough to be considered for inlining. */
891 if (!max_depth
892 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
893 return false;
894 heap = fibheap_new ();
895 lookup_recursive_calls (node, node, heap);
896 if (fibheap_empty (heap))
898 fibheap_delete (heap);
899 return false;
902 if (dump_file)
903 fprintf (dump_file,
904 " Performing recursive inlining on %s\n",
905 cgraph_node_name (node));
907 /* We need original clone to copy around. */
908 master_clone = cgraph_clone_node (node, node->decl,
909 node->count, CGRAPH_FREQ_BASE, 1,
910 false, NULL);
911 master_clone->needed = true;
912 for (e = master_clone->callees; e; e = e->next_callee)
913 if (!e->inline_failed)
914 cgraph_clone_inlined_nodes (e, true, false);
916 /* Do the inlining and update list of recursive call during process. */
917 while (!fibheap_empty (heap)
918 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
919 <= limit))
921 struct cgraph_edge *curr
922 = (struct cgraph_edge *) fibheap_extract_min (heap);
923 struct cgraph_node *cnode;
925 depth = 1;
926 for (cnode = curr->caller;
927 cnode->global.inlined_to; cnode = cnode->callers->caller)
928 if (node->decl == curr->callee->decl)
929 depth++;
930 if (depth > max_depth)
932 if (dump_file)
933 fprintf (dump_file,
934 " maximal depth reached\n");
935 continue;
938 if (max_count)
940 if (!cgraph_maybe_hot_edge_p (curr))
942 if (dump_file)
943 fprintf (dump_file, " Not inlining cold call\n");
944 continue;
946 if (curr->count * 100 / node->count < probability)
948 if (dump_file)
949 fprintf (dump_file,
950 " Probability of edge is too small\n");
951 continue;
955 if (dump_file)
957 fprintf (dump_file,
958 " Inlining call of depth %i", depth);
959 if (node->count)
961 fprintf (dump_file, " called approx. %.2f times per call",
962 (double)curr->count / node->count);
964 fprintf (dump_file, "\n");
966 cgraph_redirect_edge_callee (curr, master_clone);
967 cgraph_mark_inline_edge (curr, false, new_edges);
968 lookup_recursive_calls (node, curr->callee, heap);
969 n++;
971 if (!fibheap_empty (heap) && dump_file)
972 fprintf (dump_file, " Recursive inlining growth limit met.\n");
974 fibheap_delete (heap);
975 if (dump_file)
976 fprintf (dump_file,
977 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
978 master_clone->global.size, node->global.size,
979 master_clone->global.time, node->global.time);
981 /* Remove master clone we used for inlining. We rely that clones inlined
982 into master clone gets queued just before master clone so we don't
983 need recursion. */
984 for (node = cgraph_nodes; node != master_clone;
985 node = next)
987 next = node->next;
988 if (node->global.inlined_to == master_clone)
989 cgraph_remove_node (node);
991 cgraph_remove_node (master_clone);
992 /* FIXME: Recursive inlining actually reduces number of calls of the
993 function. At this place we should probably walk the function and
994 inline clones and compensate the counts accordingly. This probably
995 doesn't matter much in practice. */
996 return n > 0;
999 /* Set inline_failed for all callers of given function to REASON. */
1001 static void
1002 cgraph_set_inline_failed (struct cgraph_node *node,
1003 cgraph_inline_failed_t reason)
1005 struct cgraph_edge *e;
1007 if (dump_file)
1008 fprintf (dump_file, "Inlining failed: %s\n",
1009 cgraph_inline_failed_string (reason));
1010 for (e = node->callers; e; e = e->next_caller)
1011 if (e->inline_failed)
1012 e->inline_failed = reason;
1015 /* Given whole compilation unit estimate of INSNS, compute how large we can
1016 allow the unit to grow. */
1017 static int
1018 compute_max_insns (int insns)
1020 int max_insns = insns;
1021 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1022 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1024 return ((HOST_WIDEST_INT) max_insns
1025 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1028 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1029 static void
1030 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
1032 while (VEC_length (cgraph_edge_p, new_edges) > 0)
1034 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
1036 gcc_assert (!edge->aux);
1037 if (edge->callee->local.inlinable
1038 && edge->inline_failed
1039 && cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1040 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1045 /* We use greedy algorithm for inlining of small functions:
1046 All inline candidates are put into prioritized heap based on estimated
1047 growth of the overall number of instructions and then update the estimates.
1049 INLINED and INLINED_CALEES are just pointers to arrays large enough
1050 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
1052 static void
1053 cgraph_decide_inlining_of_small_functions (void)
1055 struct cgraph_node *node;
1056 struct cgraph_edge *edge;
1057 cgraph_inline_failed_t failed_reason;
1058 fibheap_t heap = fibheap_new ();
1059 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1060 int min_size, max_size;
1061 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1063 if (flag_indirect_inlining)
1064 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1066 if (dump_file)
1067 fprintf (dump_file, "\nDeciding on smaller functions:\n");
1069 /* Put all inline candidates into the heap. */
1071 for (node = cgraph_nodes; node; node = node->next)
1073 if (!node->local.inlinable || !node->callers)
1074 continue;
1075 if (dump_file)
1076 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
1078 node->global.estimated_growth = INT_MIN;
1079 if (!cgraph_default_inline_p (node, &failed_reason))
1081 cgraph_set_inline_failed (node, failed_reason);
1082 continue;
1085 for (edge = node->callers; edge; edge = edge->next_caller)
1086 if (edge->inline_failed)
1088 gcc_assert (!edge->aux);
1089 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1093 max_size = compute_max_insns (overall_size);
1094 min_size = overall_size;
1096 while (overall_size <= max_size
1097 && !fibheap_empty (heap))
1099 int old_size = overall_size;
1100 struct cgraph_node *where, *callee;
1101 int badness = fibheap_min_key (heap);
1102 int current_badness;
1103 int growth;
1104 cgraph_inline_failed_t not_good = CIF_OK;
1106 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1107 gcc_assert (edge->aux);
1108 edge->aux = NULL;
1109 if (!edge->inline_failed)
1110 continue;
1112 /* When updating the edge costs, we only decrease badness in the keys.
1113 When the badness increase, we keep the heap as it is and re-insert
1114 key now. */
1115 current_badness = cgraph_edge_badness (edge, false);
1116 gcc_assert (current_badness >= badness);
1117 if (current_badness != badness)
1119 edge->aux = fibheap_insert (heap, current_badness, edge);
1120 continue;
1123 callee = edge->callee;
1125 growth = (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
1126 - edge->caller->global.size);
1128 if (dump_file)
1130 fprintf (dump_file,
1131 "\nConsidering %s with %i size\n",
1132 cgraph_node_name (edge->callee),
1133 edge->callee->global.size);
1134 fprintf (dump_file,
1135 " to be inlined into %s in %s:%i\n"
1136 " Estimated growth after inlined into all callees is %+i insns.\n"
1137 " Estimated badness is %i, frequency %.2f.\n",
1138 cgraph_node_name (edge->caller),
1139 flag_wpa ? "unknown"
1140 : gimple_filename ((const_gimple) edge->call_stmt),
1141 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1142 cgraph_estimate_growth (edge->callee),
1143 badness,
1144 edge->frequency / (double)CGRAPH_FREQ_BASE);
1145 if (edge->count)
1146 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1147 if (dump_flags & TDF_DETAILS)
1148 cgraph_edge_badness (edge, true);
1151 /* When not having profile info ready we don't weight by any way the
1152 position of call in procedure itself. This means if call of
1153 function A from function B seems profitable to inline, the recursive
1154 call of function A in inline copy of A in B will look profitable too
1155 and we end up inlining until reaching maximal function growth. This
1156 is not good idea so prohibit the recursive inlining.
1158 ??? When the frequencies are taken into account we might not need this
1159 restriction.
1161 We need to be cureful here, in some testcases, e.g. directivec.c in
1162 libcpp, we can estimate self recursive function to have negative growth
1163 for inlining completely.
1165 if (!edge->count)
1167 where = edge->caller;
1168 while (where->global.inlined_to)
1170 if (where->decl == edge->callee->decl)
1171 break;
1172 where = where->callers->caller;
1174 if (where->global.inlined_to)
1176 edge->inline_failed
1177 = (edge->callee->local.disregard_inline_limits
1178 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1179 if (dump_file)
1180 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
1181 continue;
1185 if (edge->callee->local.disregard_inline_limits)
1187 else if (!cgraph_maybe_hot_edge_p (edge))
1188 not_good = CIF_UNLIKELY_CALL;
1189 else if (!flag_inline_functions
1190 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
1191 not_good = CIF_NOT_DECLARED_INLINED;
1192 else if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
1193 not_good = CIF_OPTIMIZING_FOR_SIZE;
1194 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
1196 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1197 &edge->inline_failed))
1199 edge->inline_failed = not_good;
1200 if (dump_file)
1201 fprintf (dump_file, " inline_failed:%s.\n",
1202 cgraph_inline_failed_string (edge->inline_failed));
1204 continue;
1206 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1208 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1209 &edge->inline_failed))
1211 if (dump_file)
1212 fprintf (dump_file, " inline_failed:%s.\n",
1213 cgraph_inline_failed_string (edge->inline_failed));
1215 continue;
1217 if (!tree_can_inline_p (edge))
1219 if (dump_file)
1220 fprintf (dump_file, " inline_failed:%s.\n",
1221 cgraph_inline_failed_string (edge->inline_failed));
1222 continue;
1224 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1225 &edge->inline_failed))
1227 where = edge->caller;
1228 if (where->global.inlined_to)
1229 where = where->global.inlined_to;
1230 if (!cgraph_decide_recursive_inlining (where,
1231 flag_indirect_inlining
1232 ? &new_indirect_edges : NULL))
1233 continue;
1234 if (flag_indirect_inlining)
1235 add_new_edges_to_heap (heap, new_indirect_edges);
1236 update_all_callee_keys (heap, where, updated_nodes);
1238 else
1240 struct cgraph_node *callee;
1241 if (edge->call_stmt_cannot_inline_p
1242 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1243 &edge->inline_failed, true))
1245 if (dump_file)
1246 fprintf (dump_file, " Not inlining into %s:%s.\n",
1247 cgraph_node_name (edge->caller),
1248 cgraph_inline_failed_string (edge->inline_failed));
1249 continue;
1251 callee = edge->callee;
1252 gcc_checking_assert (!callee->global.inlined_to);
1253 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1254 if (flag_indirect_inlining)
1255 add_new_edges_to_heap (heap, new_indirect_edges);
1257 /* We inlined last offline copy to the body. This might lead
1258 to callees of function having fewer call sites and thus they
1259 may need updating. */
1260 if (callee->global.inlined_to)
1261 update_all_callee_keys (heap, callee, updated_nodes);
1262 else
1263 update_callee_keys (heap, edge->callee, updated_nodes);
1265 where = edge->caller;
1266 if (where->global.inlined_to)
1267 where = where->global.inlined_to;
1269 /* Our profitability metric can depend on local properties
1270 such as number of inlinable calls and size of the function body.
1271 After inlining these properties might change for the function we
1272 inlined into (since it's body size changed) and for the functions
1273 called by function we inlined (since number of it inlinable callers
1274 might change). */
1275 update_caller_keys (heap, where, updated_nodes);
1277 /* We removed one call of the function we just inlined. If offline
1278 copy is still needed, be sure to update the keys. */
1279 if (callee != where && !callee->global.inlined_to)
1280 update_caller_keys (heap, callee, updated_nodes);
1281 bitmap_clear (updated_nodes);
1283 if (dump_file)
1285 fprintf (dump_file,
1286 " Inlined into %s which now has size %i and self time %i,"
1287 "net change of %+i.\n",
1288 cgraph_node_name (edge->caller),
1289 edge->caller->global.time,
1290 edge->caller->global.size,
1291 overall_size - old_size);
1293 if (min_size > overall_size)
1295 min_size = overall_size;
1296 max_size = compute_max_insns (min_size);
1298 if (dump_file)
1299 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1302 while (!fibheap_empty (heap))
1304 int badness = fibheap_min_key (heap);
1306 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1307 gcc_assert (edge->aux);
1308 edge->aux = NULL;
1309 if (!edge->inline_failed)
1310 continue;
1311 #ifdef ENABLE_CHECKING
1312 gcc_assert (cgraph_edge_badness (edge, false) >= badness);
1313 #endif
1314 if (dump_file)
1316 fprintf (dump_file,
1317 "\nSkipping %s with %i size\n",
1318 cgraph_node_name (edge->callee),
1319 edge->callee->global.size);
1320 fprintf (dump_file,
1321 " called by %s in %s:%i\n"
1322 " Estimated growth after inlined into all callees is %+i insns.\n"
1323 " Estimated badness is %i, frequency %.2f.\n",
1324 cgraph_node_name (edge->caller),
1325 flag_wpa ? "unknown"
1326 : gimple_filename ((const_gimple) edge->call_stmt),
1327 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1328 cgraph_estimate_growth (edge->callee),
1329 badness,
1330 edge->frequency / (double)CGRAPH_FREQ_BASE);
1331 if (edge->count)
1332 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1333 if (dump_flags & TDF_DETAILS)
1334 cgraph_edge_badness (edge, true);
1336 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1337 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1338 &edge->inline_failed))
1339 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1342 if (new_indirect_edges)
1343 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1344 fibheap_delete (heap);
1345 BITMAP_FREE (updated_nodes);
1348 /* Flatten NODE from the IPA inliner. */
1350 static void
1351 cgraph_flatten (struct cgraph_node *node)
1353 struct cgraph_edge *e;
1355 /* We shouldn't be called recursively when we are being processed. */
1356 gcc_assert (node->aux == NULL);
1358 node->aux = (void *)(size_t) INLINE_ALL;
1360 for (e = node->callees; e; e = e->next_callee)
1362 struct cgraph_node *orig_callee;
1364 if (e->call_stmt_cannot_inline_p)
1365 continue;
1367 if (!e->callee->analyzed)
1369 if (dump_file)
1370 fprintf (dump_file,
1371 "Not inlining: Function body not available.\n");
1372 continue;
1375 /* We've hit cycle? It is time to give up. */
1376 if (e->callee->aux)
1378 if (dump_file)
1379 fprintf (dump_file,
1380 "Not inlining %s into %s to avoid cycle.\n",
1381 cgraph_node_name (e->callee),
1382 cgraph_node_name (e->caller));
1383 e->inline_failed = CIF_RECURSIVE_INLINING;
1384 continue;
1387 /* When the edge is already inlined, we just need to recurse into
1388 it in order to fully flatten the leaves. */
1389 if (!e->inline_failed)
1391 cgraph_flatten (e->callee);
1392 continue;
1395 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1397 if (dump_file)
1398 fprintf (dump_file, "Not inlining: recursive call.\n");
1399 continue;
1402 if (!tree_can_inline_p (e))
1404 if (dump_file)
1405 fprintf (dump_file, "Not inlining: %s",
1406 cgraph_inline_failed_string (e->inline_failed));
1407 continue;
1410 /* Inline the edge and flatten the inline clone. Avoid
1411 recursing through the original node if the node was cloned. */
1412 if (dump_file)
1413 fprintf (dump_file, " Inlining %s into %s.\n",
1414 cgraph_node_name (e->callee),
1415 cgraph_node_name (e->caller));
1416 orig_callee = e->callee;
1417 cgraph_mark_inline_edge (e, true, NULL);
1418 if (e->callee != orig_callee)
1419 orig_callee->aux = (void *)(size_t) INLINE_ALL;
1420 cgraph_flatten (e->callee);
1421 if (e->callee != orig_callee)
1422 orig_callee->aux = NULL;
1425 node->aux = NULL;
1428 /* Decide on the inlining. We do so in the topological order to avoid
1429 expenses on updating data structures. */
1431 static unsigned int
1432 cgraph_decide_inlining (void)
1434 struct cgraph_node *node;
1435 int nnodes;
1436 struct cgraph_node **order =
1437 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1438 int old_size = 0;
1439 int i;
1440 int initial_size = 0;
1442 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1443 if (in_lto_p && flag_indirect_inlining)
1444 ipa_update_after_lto_read ();
1445 if (flag_indirect_inlining)
1446 ipa_create_all_structures_for_iinln ();
1448 max_count = 0;
1449 max_benefit = 0;
1450 for (node = cgraph_nodes; node; node = node->next)
1451 if (node->analyzed)
1453 struct cgraph_edge *e;
1455 gcc_assert (inline_summary (node)->self_size == node->global.size);
1456 if (!DECL_EXTERNAL (node->decl))
1457 initial_size += node->global.size;
1458 for (e = node->callees; e; e = e->next_callee)
1459 if (max_count < e->count)
1460 max_count = e->count;
1461 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1462 max_benefit = inline_summary (node)->time_inlining_benefit;
1464 gcc_assert (in_lto_p
1465 || !max_count
1466 || (profile_info && flag_branch_probabilities));
1467 overall_size = initial_size;
1469 nnodes = cgraph_postorder (order);
1471 if (dump_file)
1472 fprintf (dump_file,
1473 "\nDeciding on inlining. Starting with size %i.\n",
1474 initial_size);
1476 for (node = cgraph_nodes; node; node = node->next)
1477 node->aux = 0;
1479 if (dump_file)
1480 fprintf (dump_file, "\nFlattening functions:\n");
1482 /* In the first pass handle functions to be flattened. Do this with
1483 a priority so none of our later choices will make this impossible. */
1484 for (i = nnodes - 1; i >= 0; i--)
1486 node = order[i];
1488 /* Handle nodes to be flattened, but don't update overall unit
1489 size. Calling the incremental inliner here is lame,
1490 a simple worklist should be enough. What should be left
1491 here from the early inliner (if it runs) is cyclic cases.
1492 Ideally when processing callees we stop inlining at the
1493 entry of cycles, possibly cloning that entry point and
1494 try to flatten itself turning it into a self-recursive
1495 function. */
1496 if (lookup_attribute ("flatten",
1497 DECL_ATTRIBUTES (node->decl)) != NULL)
1499 if (dump_file)
1500 fprintf (dump_file,
1501 "Flattening %s\n", cgraph_node_name (node));
1502 cgraph_flatten (node);
1506 cgraph_decide_inlining_of_small_functions ();
1508 if (flag_inline_functions_called_once)
1510 if (dump_file)
1511 fprintf (dump_file, "\nDeciding on functions called once:\n");
1513 /* And finally decide what functions are called once. */
1514 for (i = nnodes - 1; i >= 0; i--)
1516 node = order[i];
1518 if (node->callers
1519 && !node->callers->next_caller
1520 && !node->global.inlined_to
1521 && cgraph_will_be_removed_from_program_if_no_direct_calls (node)
1522 && node->local.inlinable
1523 && cgraph_function_body_availability (node) >= AVAIL_AVAILABLE
1524 && node->callers->inline_failed
1525 && node->callers->caller != node
1526 && node->callers->caller->global.inlined_to != node
1527 && !node->callers->call_stmt_cannot_inline_p
1528 && !DECL_EXTERNAL (node->decl))
1530 cgraph_inline_failed_t reason;
1531 old_size = overall_size;
1532 if (dump_file)
1534 fprintf (dump_file,
1535 "\nConsidering %s size %i.\n",
1536 cgraph_node_name (node), node->global.size);
1537 fprintf (dump_file,
1538 " Called once from %s %i insns.\n",
1539 cgraph_node_name (node->callers->caller),
1540 node->callers->caller->global.size);
1543 if (cgraph_check_inline_limits (node->callers->caller, node,
1544 &reason, false))
1546 struct cgraph_node *caller = node->callers->caller;
1547 cgraph_mark_inline (node->callers);
1548 if (dump_file)
1549 fprintf (dump_file,
1550 " Inlined into %s which now has %i size"
1551 " for a net change of %+i size.\n",
1552 cgraph_node_name (caller),
1553 caller->global.size,
1554 overall_size - old_size);
1556 else
1558 if (dump_file)
1559 fprintf (dump_file,
1560 " Not inlining: %s.\n",
1561 cgraph_inline_failed_string (reason));
1567 /* Free ipa-prop structures if they are no longer needed. */
1568 if (flag_indirect_inlining)
1569 ipa_free_all_structures_after_iinln ();
1571 if (dump_file)
1572 fprintf (dump_file,
1573 "\nInlined %i calls, eliminated %i functions, "
1574 "size %i turned to %i size.\n\n",
1575 ncalls_inlined, nfunctions_inlined, initial_size,
1576 overall_size);
1577 free (order);
1578 return 0;
1581 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1582 in leaf functions. */
1583 static bool
1584 leaf_node_p (struct cgraph_node *n)
1586 struct cgraph_edge *e;
1587 for (e = n->callees; e; e = e->next_callee)
1588 if (!DECL_BUILT_IN (e->callee->decl)
1589 || (!TREE_READONLY (e->callee->decl)
1590 || DECL_PURE_P (e->callee->decl)))
1591 return false;
1592 return true;
1595 /* Decide on the inlining. We do so in the topological order to avoid
1596 expenses on updating data structures. */
1598 static bool
1599 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1600 enum inlining_mode mode)
1602 struct cgraph_edge *e;
1603 bool inlined = false;
1604 cgraph_inline_failed_t failed_reason;
1606 #ifdef ENABLE_CHECKING
1607 verify_cgraph_node (node);
1608 #endif
1610 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1611 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1613 if (dump_file)
1614 fprintf (dump_file, "Incrementally flattening %s\n",
1615 cgraph_node_name (node));
1616 mode = INLINE_ALL;
1619 /* First of all look for always inline functions. */
1620 if (mode != INLINE_SIZE_NORECURSIVE)
1621 for (e = node->callees; e; e = e->next_callee)
1623 if (!e->callee->local.disregard_inline_limits
1624 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1625 continue;
1626 if (e->call_stmt_cannot_inline_p)
1627 continue;
1628 if (dump_file)
1629 fprintf (dump_file,
1630 "Considering to always inline inline candidate %s.\n",
1631 cgraph_node_name (e->callee));
1632 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1634 if (dump_file)
1635 fprintf (dump_file, "Not inlining: recursive call.\n");
1636 continue;
1638 if (!tree_can_inline_p (e))
1640 if (dump_file)
1641 fprintf (dump_file,
1642 "Not inlining: %s",
1643 cgraph_inline_failed_string (e->inline_failed));
1644 continue;
1646 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1647 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1649 if (dump_file)
1650 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1651 continue;
1653 if (!e->callee->analyzed)
1655 if (dump_file)
1656 fprintf (dump_file,
1657 "Not inlining: Function body no longer available.\n");
1658 continue;
1661 if (dump_file)
1662 fprintf (dump_file, " Inlining %s into %s.\n",
1663 cgraph_node_name (e->callee),
1664 cgraph_node_name (e->caller));
1665 cgraph_mark_inline (e);
1666 inlined = true;
1669 /* Now do the automatic inlining. */
1670 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE
1671 /* Never inline regular functions into always-inline functions
1672 during incremental inlining. */
1673 && !node->local.disregard_inline_limits)
1675 bitmap visited = BITMAP_ALLOC (NULL);
1676 for (e = node->callees; e; e = e->next_callee)
1678 int allowed_growth = 0;
1679 if (!e->callee->local.inlinable
1680 || !e->inline_failed
1681 || e->callee->local.disregard_inline_limits)
1682 continue;
1683 /* We are inlining a function to all call-sites in node
1684 or to none. So visit each candidate only once. */
1685 if (!bitmap_set_bit (visited, e->callee->uid))
1686 continue;
1687 if (dump_file)
1688 fprintf (dump_file, "Considering inline candidate %s.\n",
1689 cgraph_node_name (e->callee));
1690 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1692 if (dump_file)
1693 fprintf (dump_file, "Not inlining: recursive call.\n");
1694 continue;
1696 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1697 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1699 if (dump_file)
1700 fprintf (dump_file,
1701 "Not inlining: SSA form does not match.\n");
1702 continue;
1705 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1706 && optimize_function_for_speed_p (cfun))
1707 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1709 /* When the function body would grow and inlining the function
1710 won't eliminate the need for offline copy of the function,
1711 don't inline. */
1712 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1713 || (!flag_inline_functions
1714 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1715 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1716 > e->caller->global.size + allowed_growth)
1717 && cgraph_estimate_growth (e->callee) > allowed_growth)
1719 if (dump_file)
1720 fprintf (dump_file,
1721 "Not inlining: code size would grow by %i.\n",
1722 cgraph_estimate_size_after_inlining (1, e->caller,
1723 e->callee)
1724 - e->caller->global.size);
1725 continue;
1727 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1728 false)
1729 || e->call_stmt_cannot_inline_p)
1731 if (dump_file)
1732 fprintf (dump_file, "Not inlining: %s.\n",
1733 cgraph_inline_failed_string (e->inline_failed));
1734 continue;
1736 if (!e->callee->analyzed)
1738 if (dump_file)
1739 fprintf (dump_file,
1740 "Not inlining: Function body no longer available.\n");
1741 continue;
1743 if (!tree_can_inline_p (e))
1745 if (dump_file)
1746 fprintf (dump_file,
1747 "Not inlining: %s.",
1748 cgraph_inline_failed_string (e->inline_failed));
1749 continue;
1751 if (cgraph_default_inline_p (e->callee, &failed_reason))
1753 if (dump_file)
1754 fprintf (dump_file, " Inlining %s into %s.\n",
1755 cgraph_node_name (e->callee),
1756 cgraph_node_name (e->caller));
1757 cgraph_mark_inline (e);
1758 inlined = true;
1761 BITMAP_FREE (visited);
1763 return inlined;
1766 /* Because inlining might remove no-longer reachable nodes, we need to
1767 keep the array visible to garbage collector to avoid reading collected
1768 out nodes. */
1769 static int nnodes;
1770 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1772 /* Do inlining of small functions. Doing so early helps profiling and other
1773 passes to be somewhat more effective and avoids some code duplication in
1774 later real inlining pass for testcases with very many function calls. */
1775 static unsigned int
1776 cgraph_early_inlining (void)
1778 struct cgraph_node *node = cgraph_node (current_function_decl);
1779 unsigned int todo = 0;
1780 int iterations = 0;
1782 if (seen_error ())
1783 return 0;
1785 if (!optimize
1786 || flag_no_inline
1787 || !flag_early_inlining)
1789 /* When not optimizing or not inlining inline only always-inline
1790 functions. */
1791 cgraph_decide_inlining_incrementally (node, INLINE_ALWAYS_INLINE);
1792 timevar_push (TV_INTEGRATION);
1793 todo |= optimize_inline_calls (current_function_decl);
1794 timevar_pop (TV_INTEGRATION);
1796 else
1798 if (lookup_attribute ("flatten",
1799 DECL_ATTRIBUTES (node->decl)) != NULL)
1801 if (dump_file)
1802 fprintf (dump_file,
1803 "Flattening %s\n", cgraph_node_name (node));
1804 cgraph_flatten (node);
1805 timevar_push (TV_INTEGRATION);
1806 todo |= optimize_inline_calls (current_function_decl);
1807 timevar_pop (TV_INTEGRATION);
1809 /* We iterate incremental inlining to get trivial cases of indirect
1810 inlining. */
1811 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1812 && cgraph_decide_inlining_incrementally (node,
1813 iterations
1814 ? INLINE_SIZE_NORECURSIVE
1815 : INLINE_SIZE))
1817 timevar_push (TV_INTEGRATION);
1818 todo |= optimize_inline_calls (current_function_decl);
1819 iterations++;
1820 timevar_pop (TV_INTEGRATION);
1822 if (dump_file)
1823 fprintf (dump_file, "Iterations: %i\n", iterations);
1826 cfun->always_inline_functions_inlined = true;
1828 return todo;
1831 struct gimple_opt_pass pass_early_inline =
1834 GIMPLE_PASS,
1835 "einline", /* name */
1836 NULL, /* gate */
1837 cgraph_early_inlining, /* execute */
1838 NULL, /* sub */
1839 NULL, /* next */
1840 0, /* static_pass_number */
1841 TV_INLINE_HEURISTICS, /* tv_id */
1842 0, /* properties_required */
1843 0, /* properties_provided */
1844 0, /* properties_destroyed */
1845 0, /* todo_flags_start */
1846 TODO_dump_func /* todo_flags_finish */
1851 /* See if statement might disappear after inlining. We are not terribly
1852 sophisficated, basically looking for simple abstraction penalty wrappers. */
1854 static bool
1855 likely_eliminated_by_inlining_p (gimple stmt)
1857 enum gimple_code code = gimple_code (stmt);
1858 switch (code)
1860 case GIMPLE_RETURN:
1861 return true;
1862 case GIMPLE_ASSIGN:
1863 if (gimple_num_ops (stmt) != 2)
1864 return false;
1866 /* Casts of parameters, loads from parameters passed by reference
1867 and stores to return value or parameters are probably free after
1868 inlining. */
1869 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1870 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1871 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1872 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1874 tree rhs = gimple_assign_rhs1 (stmt);
1875 tree lhs = gimple_assign_lhs (stmt);
1876 tree inner_rhs = rhs;
1877 tree inner_lhs = lhs;
1878 bool rhs_free = false;
1879 bool lhs_free = false;
1881 while (handled_component_p (inner_lhs)
1882 || TREE_CODE (inner_lhs) == MEM_REF)
1883 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1884 while (handled_component_p (inner_rhs)
1885 || TREE_CODE (inner_rhs) == ADDR_EXPR
1886 || TREE_CODE (inner_rhs) == MEM_REF)
1887 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1890 if (TREE_CODE (inner_rhs) == PARM_DECL
1891 || (TREE_CODE (inner_rhs) == SSA_NAME
1892 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1893 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1894 rhs_free = true;
1895 if (rhs_free && is_gimple_reg (lhs))
1896 lhs_free = true;
1897 if (((TREE_CODE (inner_lhs) == PARM_DECL
1898 || (TREE_CODE (inner_lhs) == SSA_NAME
1899 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1900 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1901 && inner_lhs != lhs)
1902 || TREE_CODE (inner_lhs) == RESULT_DECL
1903 || (TREE_CODE (inner_lhs) == SSA_NAME
1904 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1905 lhs_free = true;
1906 if (lhs_free
1907 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1908 rhs_free = true;
1909 if (lhs_free && rhs_free)
1910 return true;
1912 return false;
1913 default:
1914 return false;
1918 /* Compute function body size parameters for NODE. */
1920 static void
1921 estimate_function_body_sizes (struct cgraph_node *node)
1923 gcov_type time = 0;
1924 gcov_type time_inlining_benefit = 0;
1925 int size = 0;
1926 int size_inlining_benefit = 0;
1927 basic_block bb;
1928 gimple_stmt_iterator bsi;
1929 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1930 tree arg;
1931 int freq;
1932 tree funtype = TREE_TYPE (node->decl);
1934 if (dump_file)
1935 fprintf (dump_file, "Analyzing function body size: %s\n",
1936 cgraph_node_name (node));
1938 gcc_assert (my_function && my_function->cfg);
1939 FOR_EACH_BB_FN (bb, my_function)
1941 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1942 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1944 gimple stmt = gsi_stmt (bsi);
1945 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1946 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1948 if (dump_file && (dump_flags & TDF_DETAILS))
1950 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1951 freq, this_size, this_time);
1952 print_gimple_stmt (dump_file, stmt, 0, 0);
1954 this_time *= freq;
1955 time += this_time;
1956 size += this_size;
1957 if (likely_eliminated_by_inlining_p (stmt))
1959 size_inlining_benefit += this_size;
1960 time_inlining_benefit += this_time;
1961 if (dump_file && (dump_flags & TDF_DETAILS))
1962 fprintf (dump_file, " Likely eliminated\n");
1964 gcc_assert (time >= 0);
1965 gcc_assert (size >= 0);
1968 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1969 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1970 / CGRAPH_FREQ_BASE);
1971 if (dump_file)
1972 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1973 (int)time, (int)time_inlining_benefit,
1974 size, size_inlining_benefit);
1975 time_inlining_benefit += eni_time_weights.call_cost;
1976 size_inlining_benefit += eni_size_weights.call_cost;
1977 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1979 int cost = estimate_move_cost (TREE_TYPE (funtype));
1980 time_inlining_benefit += cost;
1981 size_inlining_benefit += cost;
1983 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = DECL_CHAIN (arg))
1984 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1986 int cost = estimate_move_cost (TREE_TYPE (arg));
1987 time_inlining_benefit += cost;
1988 size_inlining_benefit += cost;
1990 if (time_inlining_benefit > MAX_TIME)
1991 time_inlining_benefit = MAX_TIME;
1992 if (time > MAX_TIME)
1993 time = MAX_TIME;
1994 inline_summary (node)->self_time = time;
1995 inline_summary (node)->self_size = size;
1996 if (dump_file)
1997 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
1998 (int)time, (int)time_inlining_benefit,
1999 size, size_inlining_benefit);
2000 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
2001 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
2004 /* Compute parameters of functions used by inliner. */
2005 unsigned int
2006 compute_inline_parameters (struct cgraph_node *node)
2008 HOST_WIDE_INT self_stack_size;
2010 gcc_assert (!node->global.inlined_to);
2012 /* Estimate the stack size for the function. But not at -O0
2013 because estimated_stack_frame_size is a quadratic problem. */
2014 self_stack_size = optimize ? estimated_stack_frame_size (node->decl) : 0;
2015 inline_summary (node)->estimated_self_stack_size = self_stack_size;
2016 node->global.estimated_stack_size = self_stack_size;
2017 node->global.stack_frame_offset = 0;
2019 /* Can this function be inlined at all? */
2020 node->local.inlinable = tree_inlinable_function_p (node->decl);
2021 if (node->local.inlinable && !node->local.disregard_inline_limits)
2022 node->local.disregard_inline_limits
2023 = DECL_DISREGARD_INLINE_LIMITS (node->decl);
2024 estimate_function_body_sizes (node);
2025 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2026 node->global.time = inline_summary (node)->self_time;
2027 node->global.size = inline_summary (node)->self_size;
2028 return 0;
2032 /* Compute parameters of functions used by inliner using
2033 current_function_decl. */
2034 static unsigned int
2035 compute_inline_parameters_for_current (void)
2037 compute_inline_parameters (cgraph_node (current_function_decl));
2038 return 0;
2041 struct gimple_opt_pass pass_inline_parameters =
2044 GIMPLE_PASS,
2045 "inline_param", /* name */
2046 NULL, /* gate */
2047 compute_inline_parameters_for_current,/* execute */
2048 NULL, /* sub */
2049 NULL, /* next */
2050 0, /* static_pass_number */
2051 TV_INLINE_HEURISTICS, /* tv_id */
2052 0, /* properties_required */
2053 0, /* properties_provided */
2054 0, /* properties_destroyed */
2055 0, /* todo_flags_start */
2056 0 /* todo_flags_finish */
2060 /* This function performs intraprocedural analyzis in NODE that is required to
2061 inline indirect calls. */
2062 static void
2063 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
2065 ipa_analyze_node (node);
2066 if (dump_file && (dump_flags & TDF_DETAILS))
2068 ipa_print_node_params (dump_file, node);
2069 ipa_print_node_jump_functions (dump_file, node);
2073 /* Note function body size. */
2074 static void
2075 analyze_function (struct cgraph_node *node)
2077 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2078 current_function_decl = node->decl;
2080 compute_inline_parameters (node);
2081 /* FIXME: We should remove the optimize check after we ensure we never run
2082 IPA passes when not optimizng. */
2083 if (flag_indirect_inlining && optimize)
2084 inline_indirect_intraprocedural_analysis (node);
2086 current_function_decl = NULL;
2087 pop_cfun ();
2090 /* Called when new function is inserted to callgraph late. */
2091 static void
2092 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
2094 analyze_function (node);
2097 /* Note function body size. */
2098 static void
2099 inline_generate_summary (void)
2101 struct cgraph_node *node;
2103 function_insertion_hook_holder =
2104 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2106 if (flag_indirect_inlining)
2108 ipa_register_cgraph_hooks ();
2109 ipa_check_create_node_params ();
2110 ipa_check_create_edge_args ();
2113 for (node = cgraph_nodes; node; node = node->next)
2114 if (node->analyzed)
2115 analyze_function (node);
2117 return;
2120 /* Apply inline plan to function. */
2121 static unsigned int
2122 inline_transform (struct cgraph_node *node)
2124 unsigned int todo = 0;
2125 struct cgraph_edge *e;
2126 bool inline_p = false;
2128 /* FIXME: Currently the passmanager is adding inline transform more than once to some
2129 clones. This needs revisiting after WPA cleanups. */
2130 if (cfun->after_inlining)
2131 return 0;
2133 /* We might need the body of this function so that we can expand
2134 it inline somewhere else. */
2135 if (cgraph_preserve_function_body_p (node->decl))
2136 save_inline_function_body (node);
2138 for (e = node->callees; e; e = e->next_callee)
2140 cgraph_redirect_edge_call_stmt_to_callee (e);
2141 if (!e->inline_failed || warn_inline)
2142 inline_p = true;
2145 if (inline_p)
2147 timevar_push (TV_INTEGRATION);
2148 todo = optimize_inline_calls (current_function_decl);
2149 timevar_pop (TV_INTEGRATION);
2151 cfun->always_inline_functions_inlined = true;
2152 cfun->after_inlining = true;
2153 return todo | execute_fixup_cfg ();
2156 /* Read inline summary. Jump functions are shared among ipa-cp
2157 and inliner, so when ipa-cp is active, we don't need to write them
2158 twice. */
2160 static void
2161 inline_read_summary (void)
2163 if (flag_indirect_inlining)
2165 ipa_register_cgraph_hooks ();
2166 if (!flag_ipa_cp)
2167 ipa_prop_read_jump_functions ();
2169 function_insertion_hook_holder =
2170 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2173 /* Write inline summary for node in SET.
2174 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
2175 active, we don't need to write them twice. */
2177 static void
2178 inline_write_summary (cgraph_node_set set,
2179 varpool_node_set vset ATTRIBUTE_UNUSED)
2181 if (flag_indirect_inlining && !flag_ipa_cp)
2182 ipa_prop_write_jump_functions (set);
2185 /* When to run IPA inlining. Inlining of always-inline functions
2186 happens during early inlining. */
2188 static bool
2189 gate_cgraph_decide_inlining (void)
2191 /* ??? We'd like to skip this if not optimizing or not inlining as
2192 all always-inline functions have been processed by early
2193 inlining already. But this at least breaks EH with C++ as
2194 we need to unconditionally run fixup_cfg even at -O0.
2195 So leave it on unconditionally for now. */
2196 return 1;
2199 struct ipa_opt_pass_d pass_ipa_inline =
2202 IPA_PASS,
2203 "inline", /* name */
2204 gate_cgraph_decide_inlining, /* gate */
2205 cgraph_decide_inlining, /* execute */
2206 NULL, /* sub */
2207 NULL, /* next */
2208 0, /* static_pass_number */
2209 TV_INLINE_HEURISTICS, /* tv_id */
2210 0, /* properties_required */
2211 0, /* properties_provided */
2212 0, /* properties_destroyed */
2213 TODO_remove_functions, /* todo_flags_finish */
2214 TODO_dump_cgraph | TODO_dump_func
2215 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
2217 inline_generate_summary, /* generate_summary */
2218 inline_write_summary, /* write_summary */
2219 inline_read_summary, /* read_summary */
2220 NULL, /* write_optimization_summary */
2221 NULL, /* read_optimization_summary */
2222 NULL, /* stmt_fixup */
2223 0, /* TODOs */
2224 inline_transform, /* function_transform */
2225 NULL, /* variable_transform */
2229 #include "gt-ipa-inline.h"