Daily bump.
[official-gcc.git] / gcc / ipa-inline.c
blob77ab17e64e53fcd5127b942689d6cfe1435ea81c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
29 There are three major parts of this file:
31 cgraph_mark_inline_edge implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
43 inlining heuristics
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used by early inliner.
66 The inliner itself is split into several passes:
68 pass_inline_parameters
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
79 pass_early_inlining
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
86 The pass is run during conversion into SSA form. Only functions already
87 converted into SSA form are inlined, so the conversion must happen in
88 topological order on the callgraph (that is maintained by pass manager).
89 The functions after inlining are early optimized so the early inliner sees
90 unoptimized function itself, but all considered callees are already
91 optimized allowing it to unfold abstraction penalty on C++ effectively and
92 cheaply.
94 pass_ipa_inline
96 This is the main pass implementing simple greedy algorithm to do inlining
97 of small functions that results in overall growth of compilation unit and
98 inlining of functions called once. The pass compute just so called inline
99 plan (representation of inlining to be done in callgraph) and unlike early
100 inlining it is not performing the inlining itself.
103 #include "config.h"
104 #include "system.h"
105 #include "coretypes.h"
106 #include "tm.h"
107 #include "tree.h"
108 #include "tree-inline.h"
109 #include "langhooks.h"
110 #include "flags.h"
111 #include "cgraph.h"
112 #include "diagnostic.h"
113 #include "gimple-pretty-print.h"
114 #include "timevar.h"
115 #include "params.h"
116 #include "fibheap.h"
117 #include "intl.h"
118 #include "tree-pass.h"
119 #include "hashtab.h"
120 #include "coverage.h"
121 #include "ggc.h"
122 #include "tree-flow.h"
123 #include "rtl.h"
124 #include "ipa-prop.h"
125 #include "except.h"
127 #define MAX_TIME 1000000000
129 /* Mode incremental inliner operate on:
131 In ALWAYS_INLINE only functions marked
132 always_inline are inlined. This mode is used after detecting cycle during
133 flattening.
135 In SIZE mode, only functions that reduce function body size after inlining
136 are inlined, this is used during early inlining.
138 in ALL mode, everything is inlined. This is used during flattening. */
139 enum inlining_mode {
140 INLINE_NONE = 0,
141 INLINE_ALWAYS_INLINE,
142 INLINE_SIZE_NORECURSIVE,
143 INLINE_SIZE,
144 INLINE_ALL
147 static bool
148 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode);
149 static void cgraph_flatten (struct cgraph_node *node);
152 /* Statistics we collect about inlining algorithm. */
153 static int ncalls_inlined;
154 static int nfunctions_inlined;
155 static int overall_size;
156 static gcov_type max_count, max_benefit;
158 /* Holders of ipa cgraph hooks: */
159 static struct cgraph_node_hook_list *function_insertion_hook_holder;
161 static inline struct inline_summary *
162 inline_summary (struct cgraph_node *node)
164 return &node->local.inline_summary;
167 /* Estimate self time of the function after inlining WHAT into TO. */
169 static int
170 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
171 struct cgraph_node *what)
173 gcov_type time = (((gcov_type)what->global.time
174 - inline_summary (what)->time_inlining_benefit)
175 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
176 + to->global.time;
177 if (time < 0)
178 time = 0;
179 if (time > MAX_TIME)
180 time = MAX_TIME;
181 return time;
184 /* Estimate self size of the function after inlining WHAT into TO. */
186 static inline int
187 cgraph_estimate_size_after_inlining (struct cgraph_node *to,
188 struct cgraph_node *what)
190 int size = ((what->global.size - inline_summary (what)->size_inlining_benefit)
191 + to->global.size);
192 gcc_assert (size >= 0);
193 return size;
196 /* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
197 by NEST. */
199 static void
200 update_noncloned_frequencies (struct cgraph_node *node,
201 int freq_scale, int nest)
203 struct cgraph_edge *e;
205 /* We do not want to ignore high loop nest after freq drops to 0. */
206 if (!freq_scale)
207 freq_scale = 1;
208 for (e = node->callees; e; e = e->next_callee)
210 e->loop_nest += nest;
211 e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
212 if (e->frequency > CGRAPH_FREQ_MAX)
213 e->frequency = CGRAPH_FREQ_MAX;
214 if (!e->inline_failed)
215 update_noncloned_frequencies (e->callee, freq_scale, nest);
219 /* E is expected to be an edge being inlined. Clone destination node of
220 the edge and redirect it to the new clone.
221 DUPLICATE is used for bookkeeping on whether we are actually creating new
222 clones or re-using node originally representing out-of-line function call.
224 void
225 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
226 bool update_original)
228 HOST_WIDE_INT peak;
230 if (duplicate)
232 /* We may eliminate the need for out-of-line copy to be output.
233 In that case just go ahead and re-use it. */
234 if (!e->callee->callers->next_caller
235 /* Recursive inlining never wants the master clone to be overwritten. */
236 && update_original
237 /* FIXME: When address is taken of DECL_EXTERNAL function we still can remove its
238 offline copy, but we would need to keep unanalyzed node in the callgraph so
239 references can point to it. */
240 && !e->callee->address_taken
241 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
242 /* Inlining might enable more devirtualizing, so we want to remove
243 those only after all devirtualizable virtual calls are processed.
244 Lacking may edges in callgraph we just preserve them post
245 inlining. */
246 && (!DECL_VIRTUAL_P (e->callee->decl)
247 || (!DECL_COMDAT (e->callee->decl) && !DECL_EXTERNAL (e->callee->decl)))
248 /* Don't reuse if more than one function shares a comdat group.
249 If the other function(s) are needed, we need to emit even
250 this function out of line. */
251 && !e->callee->same_comdat_group
252 && !cgraph_new_nodes)
254 gcc_assert (!e->callee->global.inlined_to);
255 if (e->callee->analyzed && !DECL_EXTERNAL (e->callee->decl))
257 overall_size -= e->callee->global.size;
258 nfunctions_inlined++;
260 duplicate = false;
261 e->callee->local.externally_visible = false;
262 update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
264 else
266 struct cgraph_node *n;
267 n = cgraph_clone_node (e->callee, e->callee->decl,
268 e->count, e->frequency, e->loop_nest,
269 update_original, NULL);
270 cgraph_redirect_edge_callee (e, n);
274 if (e->caller->global.inlined_to)
275 e->callee->global.inlined_to = e->caller->global.inlined_to;
276 else
277 e->callee->global.inlined_to = e->caller;
278 e->callee->global.stack_frame_offset
279 = e->caller->global.stack_frame_offset
280 + inline_summary (e->caller)->estimated_self_stack_size;
281 peak = e->callee->global.stack_frame_offset
282 + inline_summary (e->callee)->estimated_self_stack_size;
283 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
284 e->callee->global.inlined_to->global.estimated_stack_size = peak;
285 cgraph_propagate_frequency (e->callee);
287 /* Recursively clone all bodies. */
288 for (e = e->callee->callees; e; e = e->next_callee)
289 if (!e->inline_failed)
290 cgraph_clone_inlined_nodes (e, duplicate, update_original);
293 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
294 specify whether profile of original function should be updated. If any new
295 indirect edges are discovered in the process, add them to NEW_EDGES, unless
296 it is NULL. Return true iff any new callgraph edges were discovered as a
297 result of inlining. */
299 static bool
300 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
301 VEC (cgraph_edge_p, heap) **new_edges)
303 int old_size = 0, new_size = 0;
304 struct cgraph_node *to = NULL, *what;
305 struct cgraph_edge *curr = e;
306 int freq;
308 /* Don't inline inlined edges. */
309 gcc_assert (e->inline_failed);
310 /* Don't even think of inlining inline clone. */
311 gcc_assert (!e->callee->global.inlined_to);
313 e->inline_failed = CIF_OK;
314 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
316 cgraph_clone_inlined_nodes (e, true, update_original);
318 what = e->callee;
320 freq = e->frequency;
321 /* Now update size of caller and all functions caller is inlined into. */
322 for (;e && !e->inline_failed; e = e->caller->callers)
324 to = e->caller;
325 old_size = e->caller->global.size;
326 new_size = cgraph_estimate_size_after_inlining (to, what);
327 to->global.size = new_size;
328 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
330 gcc_assert (what->global.inlined_to == to);
331 if (new_size > old_size)
332 overall_size += new_size - old_size;
333 ncalls_inlined++;
335 /* FIXME: We should remove the optimize check after we ensure we never run
336 IPA passes when not optimizing. */
337 if (flag_indirect_inlining && optimize)
338 return ipa_propagate_indirect_call_infos (curr, new_edges);
339 else
340 return false;
343 /* Estimate the growth caused by inlining NODE into all callees. */
345 static int
346 cgraph_estimate_growth (struct cgraph_node *node)
348 int growth = 0;
349 struct cgraph_edge *e;
350 bool self_recursive = false;
352 if (node->global.estimated_growth != INT_MIN)
353 return node->global.estimated_growth;
355 for (e = node->callers; e; e = e->next_caller)
357 if (e->caller == node)
358 self_recursive = true;
359 if (e->inline_failed)
360 growth += (cgraph_estimate_size_after_inlining (e->caller, node)
361 - e->caller->global.size);
364 /* ??? Wrong for non-trivially self recursive functions or cases where
365 we decide to not inline for different reasons, but it is not big deal
366 as in that case we will keep the body around, but we will also avoid
367 some inlining. */
368 if (cgraph_will_be_removed_from_program_if_no_direct_calls (node)
369 && !DECL_EXTERNAL (node->decl) && !self_recursive)
370 growth -= node->global.size;
371 /* COMDAT functions are very often not shared across multiple units since they
372 come from various template instantiations. Take this into account. */
373 else if (DECL_COMDAT (node->decl) && !self_recursive
374 && cgraph_can_remove_if_no_direct_calls_p (node))
375 growth -= (node->global.size
376 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY)) + 50) / 100;
378 node->global.estimated_growth = growth;
379 return growth;
382 /* Return false when inlining WHAT into TO is not good idea
383 as it would cause too large growth of function bodies.
384 When ONE_ONLY is true, assume that only one call site is going
385 to be inlined, otherwise figure out how many call sites in
386 TO calls WHAT and verify that all can be inlined.
389 static bool
390 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
391 cgraph_inline_failed_t *reason)
393 int newsize;
394 int limit;
395 HOST_WIDE_INT stack_size_limit, inlined_stack;
397 if (to->global.inlined_to)
398 to = to->global.inlined_to;
400 /* When inlining large function body called once into small function,
401 take the inlined function as base for limiting the growth. */
402 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
403 limit = inline_summary (to)->self_size;
404 else
405 limit = inline_summary (what)->self_size;
407 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
409 /* Check the size after inlining against the function limits. But allow
410 the function to shrink if it went over the limits by forced inlining. */
411 newsize = cgraph_estimate_size_after_inlining (to, what);
412 if (newsize >= to->global.size
413 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
414 && newsize > limit)
416 if (reason)
417 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
418 return false;
421 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
423 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
425 inlined_stack = (to->global.stack_frame_offset
426 + inline_summary (to)->estimated_self_stack_size
427 + what->global.estimated_stack_size);
428 if (inlined_stack > stack_size_limit
429 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
431 if (reason)
432 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
433 return false;
435 return true;
438 /* Return true when function N is small enough to be inlined. */
440 static bool
441 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
443 tree decl = n->decl;
445 if (n->local.disregard_inline_limits)
446 return true;
448 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
450 if (reason)
451 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
452 return false;
454 if (!n->analyzed)
456 if (reason)
457 *reason = CIF_BODY_NOT_AVAILABLE;
458 return false;
460 if (cgraph_function_body_availability (n) <= AVAIL_OVERWRITABLE)
462 if (reason)
463 *reason = CIF_OVERWRITABLE;
464 return false;
468 if (DECL_DECLARED_INLINE_P (decl))
470 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
472 if (reason)
473 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
474 return false;
477 else
479 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
481 if (reason)
482 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
483 return false;
487 return true;
490 /* Return true when inlining WHAT would create recursive inlining.
491 We call recursive inlining all cases where same function appears more than
492 once in the single recursion nest path in the inline graph. */
494 static inline bool
495 cgraph_recursive_inlining_p (struct cgraph_node *to,
496 struct cgraph_node *what,
497 cgraph_inline_failed_t *reason)
499 bool recursive;
500 if (to->global.inlined_to)
501 recursive = what->decl == to->global.inlined_to->decl;
502 else
503 recursive = what->decl == to->decl;
504 /* Marking recursive function inline has sane semantic and thus we should
505 not warn on it. */
506 if (recursive && reason)
507 *reason = (what->local.disregard_inline_limits
508 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
509 return recursive;
512 /* A cost model driving the inlining heuristics in a way so the edges with
513 smallest badness are inlined first. After each inlining is performed
514 the costs of all caller edges of nodes affected are recomputed so the
515 metrics may accurately depend on values such as number of inlinable callers
516 of the function or function body size. */
518 static int
519 cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
521 gcov_type badness;
522 int growth;
524 if (edge->callee->local.disregard_inline_limits)
525 return INT_MIN;
527 growth =
528 (cgraph_estimate_size_after_inlining (edge->caller, edge->callee)
529 - edge->caller->global.size);
531 if (dump)
533 fprintf (dump_file, " Badness calculation for %s -> %s\n",
534 cgraph_node_name (edge->caller),
535 cgraph_node_name (edge->callee));
536 fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
537 growth,
538 edge->callee->global.time,
539 inline_summary (edge->callee)->time_inlining_benefit,
540 edge->callee->global.size,
541 inline_summary (edge->callee)->size_inlining_benefit);
544 /* Always prefer inlining saving code size. */
545 if (growth <= 0)
547 badness = INT_MIN - growth;
548 if (dump)
549 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
550 growth);
553 /* When profiling is available, base priorities -(#calls / growth).
554 So we optimize for overall number of "executed" inlined calls. */
555 else if (max_count)
557 badness =
558 ((int)
559 ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
560 (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
561 if (dump)
563 fprintf (dump_file,
564 " %i (relative %f): profile info. Relative count %f"
565 " * Relative benefit %f\n",
566 (int) badness, (double) badness / INT_MIN,
567 (double) edge->count / max_count,
568 (double) (inline_summary (edge->callee)->
569 time_inlining_benefit + 1) / (max_benefit + 1));
573 /* When function local profile is available, base priorities on
574 growth / frequency, so we optimize for overall frequency of inlined
575 calls. This is not too accurate since while the call might be frequent
576 within function, the function itself is infrequent.
578 Other objective to optimize for is number of different calls inlined.
579 We add the estimated growth after inlining all functions to bias the
580 priorities slightly in this direction (so fewer times called functions
581 of the same size gets priority). */
582 else if (flag_guess_branch_prob)
584 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
585 int benefitperc;
586 int growth_for_all;
587 badness = growth * 10000;
588 benefitperc =
589 100 * inline_summary (edge->callee)->time_inlining_benefit
590 / (edge->callee->global.time + 1) +1;
591 benefitperc = MIN (benefitperc, 100);
592 div *= benefitperc;
594 /* Decrease badness if call is nested. */
595 /* Compress the range so we don't overflow. */
596 if (div > 10000)
597 div = 10000 + ceil_log2 (div) - 8;
598 if (div < 1)
599 div = 1;
600 if (badness > 0)
601 badness /= div;
602 growth_for_all = cgraph_estimate_growth (edge->callee);
603 badness += growth_for_all;
604 if (badness > INT_MAX)
605 badness = INT_MAX;
606 if (dump)
608 fprintf (dump_file,
609 " %i: guessed profile. frequency %i, overall growth %i,"
610 " benefit %i%%, divisor %i\n",
611 (int) badness, edge->frequency, growth_for_all, benefitperc, div);
614 /* When function local profile is not available or it does not give
615 useful information (ie frequency is zero), base the cost on
616 loop nest and overall size growth, so we optimize for overall number
617 of functions fully inlined in program. */
618 else
620 int nest = MIN (edge->loop_nest, 8);
621 badness = cgraph_estimate_growth (edge->callee) * 256;
623 /* Decrease badness if call is nested. */
624 if (badness > 0)
625 badness >>= nest;
626 else
628 badness <<= nest;
630 if (dump)
631 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
632 nest);
635 /* Ensure that we did not overflow in all the fixed point math above. */
636 gcc_assert (badness >= INT_MIN);
637 gcc_assert (badness <= INT_MAX - 1);
638 /* Make recursive inlining happen always after other inlining is done. */
639 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
640 return badness + 1;
641 else
642 return badness;
645 /* Recompute badness of EDGE and update its key in HEAP if needed. */
646 static void
647 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
649 int badness = cgraph_edge_badness (edge, false);
650 if (edge->aux)
652 fibnode_t n = (fibnode_t) edge->aux;
653 gcc_checking_assert (n->data == edge);
655 /* fibheap_replace_key only decrease the keys.
656 When we increase the key we do not update heap
657 and instead re-insert the element once it becomes
658 a minimum of heap. */
659 if (badness < n->key)
661 fibheap_replace_key (heap, n, badness);
662 gcc_checking_assert (n->key == badness);
665 else
666 edge->aux = fibheap_insert (heap, badness, edge);
669 /* Recompute heap nodes for each of caller edge. */
671 static void
672 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
673 bitmap updated_nodes)
675 struct cgraph_edge *edge;
676 cgraph_inline_failed_t failed_reason;
678 if (!node->local.inlinable
679 || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
680 || node->global.inlined_to)
681 return;
682 if (!bitmap_set_bit (updated_nodes, node->uid))
683 return;
684 node->global.estimated_growth = INT_MIN;
686 /* See if there is something to do. */
687 for (edge = node->callers; edge; edge = edge->next_caller)
688 if (edge->inline_failed)
689 break;
690 if (!edge)
691 return;
692 /* Prune out edges we won't inline into anymore. */
693 if (!cgraph_default_inline_p (node, &failed_reason))
695 for (; edge; edge = edge->next_caller)
696 if (edge->aux)
698 fibheap_delete_node (heap, (fibnode_t) edge->aux);
699 edge->aux = NULL;
700 if (edge->inline_failed)
701 edge->inline_failed = failed_reason;
703 return;
706 for (; edge; edge = edge->next_caller)
707 if (edge->inline_failed)
708 update_edge_key (heap, edge);
711 /* Recompute heap nodes for each uninlined call.
712 This is used when we know that edge badnesses are going only to increase
713 (we introduced new call site) and thus all we need is to insert newly
714 created edges into heap. */
716 static void
717 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
718 bitmap updated_nodes)
720 struct cgraph_edge *e = node->callees;
721 node->global.estimated_growth = INT_MIN;
723 if (!e)
724 return;
725 while (true)
726 if (!e->inline_failed && e->callee->callees)
727 e = e->callee->callees;
728 else
730 if (e->inline_failed
731 && e->callee->local.inlinable
732 && cgraph_function_body_availability (e->callee) >= AVAIL_AVAILABLE
733 && !bitmap_bit_p (updated_nodes, e->callee->uid))
735 node->global.estimated_growth = INT_MIN;
736 /* If function becomes uninlinable, we need to remove it from the heap. */
737 if (!cgraph_default_inline_p (e->callee, &e->inline_failed))
738 update_caller_keys (heap, e->callee, updated_nodes);
739 else
740 /* Otherwise update just edge E. */
741 update_edge_key (heap, e);
743 if (e->next_callee)
744 e = e->next_callee;
745 else
749 if (e->caller == node)
750 return;
751 e = e->caller->callers;
753 while (!e->next_callee);
754 e = e->next_callee;
759 /* Recompute heap nodes for each of caller edges of each of callees.
760 Walk recursively into all inline clones. */
762 static void
763 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
764 bitmap updated_nodes)
766 struct cgraph_edge *e = node->callees;
767 node->global.estimated_growth = INT_MIN;
769 if (!e)
770 return;
771 while (true)
772 if (!e->inline_failed && e->callee->callees)
773 e = e->callee->callees;
774 else
776 if (e->inline_failed)
777 update_caller_keys (heap, e->callee, updated_nodes);
778 if (e->next_callee)
779 e = e->next_callee;
780 else
784 if (e->caller == node)
785 return;
786 e = e->caller->callers;
788 while (!e->next_callee);
789 e = e->next_callee;
794 /* Enqueue all recursive calls from NODE into priority queue depending on
795 how likely we want to recursively inline the call. */
797 static void
798 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
799 fibheap_t heap)
801 static int priority;
802 struct cgraph_edge *e;
803 for (e = where->callees; e; e = e->next_callee)
804 if (e->callee == node)
806 /* When profile feedback is available, prioritize by expected number
807 of calls. Without profile feedback we maintain simple queue
808 to order candidates via recursive depths. */
809 fibheap_insert (heap,
810 !max_count ? priority++
811 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
814 for (e = where->callees; e; e = e->next_callee)
815 if (!e->inline_failed)
816 lookup_recursive_calls (node, e->callee, heap);
819 /* Decide on recursive inlining: in the case function has recursive calls,
820 inline until body size reaches given argument. If any new indirect edges
821 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
822 is NULL. */
824 static bool
825 cgraph_decide_recursive_inlining (struct cgraph_node *node,
826 VEC (cgraph_edge_p, heap) **new_edges)
828 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
829 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
830 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
831 fibheap_t heap;
832 struct cgraph_edge *e;
833 struct cgraph_node *master_clone, *next;
834 int depth = 0;
835 int n = 0;
837 /* It does not make sense to recursively inline always-inline functions
838 as we are going to sorry() on the remaining calls anyway. */
839 if (node->local.disregard_inline_limits
840 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl)))
841 return false;
843 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
844 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
845 return false;
847 if (DECL_DECLARED_INLINE_P (node->decl))
849 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
850 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
853 /* Make sure that function is small enough to be considered for inlining. */
854 if (!max_depth
855 || cgraph_estimate_size_after_inlining (node, node) >= limit)
856 return false;
857 heap = fibheap_new ();
858 lookup_recursive_calls (node, node, heap);
859 if (fibheap_empty (heap))
861 fibheap_delete (heap);
862 return false;
865 if (dump_file)
866 fprintf (dump_file,
867 " Performing recursive inlining on %s\n",
868 cgraph_node_name (node));
870 /* We need original clone to copy around. */
871 master_clone = cgraph_clone_node (node, node->decl,
872 node->count, CGRAPH_FREQ_BASE, 1,
873 false, NULL);
874 for (e = master_clone->callees; e; e = e->next_callee)
875 if (!e->inline_failed)
876 cgraph_clone_inlined_nodes (e, true, false);
878 /* Do the inlining and update list of recursive call during process. */
879 while (!fibheap_empty (heap)
880 && (cgraph_estimate_size_after_inlining (node, master_clone)
881 <= limit))
883 struct cgraph_edge *curr
884 = (struct cgraph_edge *) fibheap_extract_min (heap);
885 struct cgraph_node *cnode;
887 depth = 1;
888 for (cnode = curr->caller;
889 cnode->global.inlined_to; cnode = cnode->callers->caller)
890 if (node->decl == curr->callee->decl)
891 depth++;
892 if (depth > max_depth)
894 if (dump_file)
895 fprintf (dump_file,
896 " maximal depth reached\n");
897 continue;
900 if (max_count)
902 if (!cgraph_maybe_hot_edge_p (curr))
904 if (dump_file)
905 fprintf (dump_file, " Not inlining cold call\n");
906 continue;
908 if (curr->count * 100 / node->count < probability)
910 if (dump_file)
911 fprintf (dump_file,
912 " Probability of edge is too small\n");
913 continue;
917 if (dump_file)
919 fprintf (dump_file,
920 " Inlining call of depth %i", depth);
921 if (node->count)
923 fprintf (dump_file, " called approx. %.2f times per call",
924 (double)curr->count / node->count);
926 fprintf (dump_file, "\n");
928 cgraph_redirect_edge_callee (curr, master_clone);
929 cgraph_mark_inline_edge (curr, false, new_edges);
930 lookup_recursive_calls (node, curr->callee, heap);
931 n++;
933 if (!fibheap_empty (heap) && dump_file)
934 fprintf (dump_file, " Recursive inlining growth limit met.\n");
936 fibheap_delete (heap);
937 if (dump_file)
938 fprintf (dump_file,
939 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
940 master_clone->global.size, node->global.size,
941 master_clone->global.time, node->global.time);
943 /* Remove master clone we used for inlining. We rely that clones inlined
944 into master clone gets queued just before master clone so we don't
945 need recursion. */
946 for (node = cgraph_nodes; node != master_clone;
947 node = next)
949 next = node->next;
950 if (node->global.inlined_to == master_clone)
951 cgraph_remove_node (node);
953 cgraph_remove_node (master_clone);
954 /* FIXME: Recursive inlining actually reduces number of calls of the
955 function. At this place we should probably walk the function and
956 inline clones and compensate the counts accordingly. This probably
957 doesn't matter much in practice. */
958 return n > 0;
961 /* Set inline_failed for all callers of given function to REASON. */
963 static void
964 cgraph_set_inline_failed (struct cgraph_node *node,
965 cgraph_inline_failed_t reason)
967 struct cgraph_edge *e;
969 if (dump_file)
970 fprintf (dump_file, "Inlining failed: %s\n",
971 cgraph_inline_failed_string (reason));
972 for (e = node->callers; e; e = e->next_caller)
973 if (e->inline_failed)
974 e->inline_failed = reason;
977 /* Given whole compilation unit estimate of INSNS, compute how large we can
978 allow the unit to grow. */
979 static int
980 compute_max_insns (int insns)
982 int max_insns = insns;
983 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
984 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
986 return ((HOST_WIDEST_INT) max_insns
987 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
990 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
991 static void
992 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
994 while (VEC_length (cgraph_edge_p, new_edges) > 0)
996 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
998 gcc_assert (!edge->aux);
999 if (edge->callee->local.inlinable
1000 && edge->inline_failed
1001 && cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1002 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1007 /* We use greedy algorithm for inlining of small functions:
1008 All inline candidates are put into prioritized heap based on estimated
1009 growth of the overall number of instructions and then update the estimates.
1011 INLINED and INLINED_CALLEES are just pointers to arrays large enough
1012 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
1014 static void
1015 cgraph_decide_inlining_of_small_functions (void)
1017 struct cgraph_node *node;
1018 struct cgraph_edge *edge;
1019 cgraph_inline_failed_t failed_reason;
1020 fibheap_t heap = fibheap_new ();
1021 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1022 int min_size, max_size;
1023 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1025 if (flag_indirect_inlining)
1026 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1028 if (dump_file)
1029 fprintf (dump_file, "\nDeciding on smaller functions:\n");
1031 /* Put all inline candidates into the heap. */
1033 for (node = cgraph_nodes; node; node = node->next)
1035 if (!node->local.inlinable || !node->callers)
1036 continue;
1037 if (dump_file)
1038 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
1040 node->global.estimated_growth = INT_MIN;
1041 if (!cgraph_default_inline_p (node, &failed_reason))
1043 cgraph_set_inline_failed (node, failed_reason);
1044 continue;
1047 for (edge = node->callers; edge; edge = edge->next_caller)
1048 if (edge->inline_failed)
1050 gcc_assert (!edge->aux);
1051 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1055 max_size = compute_max_insns (overall_size);
1056 min_size = overall_size;
1058 while (overall_size <= max_size
1059 && !fibheap_empty (heap))
1061 int old_size = overall_size;
1062 struct cgraph_node *where, *callee;
1063 int badness = fibheap_min_key (heap);
1064 int current_badness;
1065 int growth;
1066 cgraph_inline_failed_t not_good = CIF_OK;
1068 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1069 gcc_assert (edge->aux);
1070 edge->aux = NULL;
1071 if (!edge->inline_failed)
1072 continue;
1074 /* When updating the edge costs, we only decrease badness in the keys.
1075 When the badness increase, we keep the heap as it is and re-insert
1076 key now. */
1077 current_badness = cgraph_edge_badness (edge, false);
1078 gcc_assert (current_badness >= badness);
1079 if (current_badness != badness)
1081 edge->aux = fibheap_insert (heap, current_badness, edge);
1082 continue;
1085 callee = edge->callee;
1087 growth = (cgraph_estimate_size_after_inlining (edge->caller, edge->callee)
1088 - edge->caller->global.size);
1090 if (dump_file)
1092 fprintf (dump_file,
1093 "\nConsidering %s with %i size\n",
1094 cgraph_node_name (edge->callee),
1095 edge->callee->global.size);
1096 fprintf (dump_file,
1097 " to be inlined into %s in %s:%i\n"
1098 " Estimated growth after inlined into all callees is %+i insns.\n"
1099 " Estimated badness is %i, frequency %.2f.\n",
1100 cgraph_node_name (edge->caller),
1101 flag_wpa ? "unknown"
1102 : gimple_filename ((const_gimple) edge->call_stmt),
1103 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1104 cgraph_estimate_growth (edge->callee),
1105 badness,
1106 edge->frequency / (double)CGRAPH_FREQ_BASE);
1107 if (edge->count)
1108 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1109 if (dump_flags & TDF_DETAILS)
1110 cgraph_edge_badness (edge, true);
1113 /* When not having profile info ready we don't weight by any way the
1114 position of call in procedure itself. This means if call of
1115 function A from function B seems profitable to inline, the recursive
1116 call of function A in inline copy of A in B will look profitable too
1117 and we end up inlining until reaching maximal function growth. This
1118 is not good idea so prohibit the recursive inlining.
1120 ??? When the frequencies are taken into account we might not need this
1121 restriction.
1123 We need to be careful here, in some testcases, e.g. directives.c in
1124 libcpp, we can estimate self recursive function to have negative growth
1125 for inlining completely.
1127 if (!edge->count)
1129 where = edge->caller;
1130 while (where->global.inlined_to)
1132 if (where->decl == edge->callee->decl)
1133 break;
1134 where = where->callers->caller;
1136 if (where->global.inlined_to)
1138 edge->inline_failed
1139 = (edge->callee->local.disregard_inline_limits
1140 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1141 if (dump_file)
1142 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
1143 continue;
1147 if (edge->callee->local.disregard_inline_limits)
1149 else if (!cgraph_maybe_hot_edge_p (edge))
1150 not_good = CIF_UNLIKELY_CALL;
1151 else if (!flag_inline_functions
1152 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
1153 not_good = CIF_NOT_DECLARED_INLINED;
1154 else if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
1155 not_good = CIF_OPTIMIZING_FOR_SIZE;
1156 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
1158 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1159 &edge->inline_failed))
1161 edge->inline_failed = not_good;
1162 if (dump_file)
1163 fprintf (dump_file, " inline_failed:%s.\n",
1164 cgraph_inline_failed_string (edge->inline_failed));
1166 continue;
1168 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1170 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1171 &edge->inline_failed))
1173 if (dump_file)
1174 fprintf (dump_file, " inline_failed:%s.\n",
1175 cgraph_inline_failed_string (edge->inline_failed));
1177 continue;
1179 if (!tree_can_inline_p (edge)
1180 || edge->call_stmt_cannot_inline_p)
1182 if (dump_file)
1183 fprintf (dump_file, " inline_failed:%s.\n",
1184 cgraph_inline_failed_string (edge->inline_failed));
1185 continue;
1187 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1188 &edge->inline_failed))
1190 where = edge->caller;
1191 if (where->global.inlined_to)
1192 where = where->global.inlined_to;
1193 if (!cgraph_decide_recursive_inlining (where,
1194 flag_indirect_inlining
1195 ? &new_indirect_edges : NULL))
1196 continue;
1197 if (flag_indirect_inlining)
1198 add_new_edges_to_heap (heap, new_indirect_edges);
1199 update_all_callee_keys (heap, where, updated_nodes);
1201 else
1203 struct cgraph_node *callee;
1204 if (!cgraph_check_inline_limits (edge->caller, edge->callee,
1205 &edge->inline_failed))
1207 if (dump_file)
1208 fprintf (dump_file, " Not inlining into %s:%s.\n",
1209 cgraph_node_name (edge->caller),
1210 cgraph_inline_failed_string (edge->inline_failed));
1211 continue;
1213 callee = edge->callee;
1214 gcc_checking_assert (!callee->global.inlined_to);
1215 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1216 if (flag_indirect_inlining)
1217 add_new_edges_to_heap (heap, new_indirect_edges);
1219 /* We inlined last offline copy to the body. This might lead
1220 to callees of function having fewer call sites and thus they
1221 may need updating. */
1222 if (callee->global.inlined_to)
1223 update_all_callee_keys (heap, callee, updated_nodes);
1224 else
1225 update_callee_keys (heap, edge->callee, updated_nodes);
1227 where = edge->caller;
1228 if (where->global.inlined_to)
1229 where = where->global.inlined_to;
1231 /* Our profitability metric can depend on local properties
1232 such as number of inlinable calls and size of the function body.
1233 After inlining these properties might change for the function we
1234 inlined into (since it's body size changed) and for the functions
1235 called by function we inlined (since number of it inlinable callers
1236 might change). */
1237 update_caller_keys (heap, where, updated_nodes);
1239 /* We removed one call of the function we just inlined. If offline
1240 copy is still needed, be sure to update the keys. */
1241 if (callee != where && !callee->global.inlined_to)
1242 update_caller_keys (heap, callee, updated_nodes);
1243 bitmap_clear (updated_nodes);
1245 if (dump_file)
1247 fprintf (dump_file,
1248 " Inlined into %s which now has time %i and size %i,"
1249 "net change of %+i.\n",
1250 cgraph_node_name (edge->caller),
1251 edge->caller->global.time,
1252 edge->caller->global.size,
1253 overall_size - old_size);
1255 if (min_size > overall_size)
1257 min_size = overall_size;
1258 max_size = compute_max_insns (min_size);
1260 if (dump_file)
1261 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1264 while (!fibheap_empty (heap))
1266 int badness = fibheap_min_key (heap);
1268 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1269 gcc_assert (edge->aux);
1270 edge->aux = NULL;
1271 if (!edge->inline_failed)
1272 continue;
1273 #ifdef ENABLE_CHECKING
1274 gcc_assert (cgraph_edge_badness (edge, false) >= badness);
1275 #endif
1276 if (dump_file)
1278 fprintf (dump_file,
1279 "\nSkipping %s with %i size\n",
1280 cgraph_node_name (edge->callee),
1281 edge->callee->global.size);
1282 fprintf (dump_file,
1283 " called by %s in %s:%i\n"
1284 " Estimated growth after inlined into all callees is %+i insns.\n"
1285 " Estimated badness is %i, frequency %.2f.\n",
1286 cgraph_node_name (edge->caller),
1287 flag_wpa ? "unknown"
1288 : gimple_filename ((const_gimple) edge->call_stmt),
1289 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1290 cgraph_estimate_growth (edge->callee),
1291 badness,
1292 edge->frequency / (double)CGRAPH_FREQ_BASE);
1293 if (edge->count)
1294 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1295 if (dump_flags & TDF_DETAILS)
1296 cgraph_edge_badness (edge, true);
1298 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1299 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1300 &edge->inline_failed))
1301 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1304 if (new_indirect_edges)
1305 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1306 fibheap_delete (heap);
1307 BITMAP_FREE (updated_nodes);
1310 /* Flatten NODE from the IPA inliner. */
1312 static void
1313 cgraph_flatten (struct cgraph_node *node)
1315 struct cgraph_edge *e;
1317 /* We shouldn't be called recursively when we are being processed. */
1318 gcc_assert (node->aux == NULL);
1320 node->aux = (void *)(size_t) INLINE_ALL;
1322 for (e = node->callees; e; e = e->next_callee)
1324 struct cgraph_node *orig_callee;
1326 if (e->call_stmt_cannot_inline_p)
1328 if (dump_file)
1329 fprintf (dump_file, "Not inlining: %s",
1330 cgraph_inline_failed_string (e->inline_failed));
1331 continue;
1334 if (!e->callee->analyzed)
1336 if (dump_file)
1337 fprintf (dump_file,
1338 "Not inlining: Function body not available.\n");
1339 continue;
1342 /* We've hit cycle? It is time to give up. */
1343 if (e->callee->aux)
1345 if (dump_file)
1346 fprintf (dump_file,
1347 "Not inlining %s into %s to avoid cycle.\n",
1348 cgraph_node_name (e->callee),
1349 cgraph_node_name (e->caller));
1350 e->inline_failed = CIF_RECURSIVE_INLINING;
1351 continue;
1354 /* When the edge is already inlined, we just need to recurse into
1355 it in order to fully flatten the leaves. */
1356 if (!e->inline_failed)
1358 cgraph_flatten (e->callee);
1359 continue;
1362 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1364 if (dump_file)
1365 fprintf (dump_file, "Not inlining: recursive call.\n");
1366 continue;
1369 if (!tree_can_inline_p (e))
1371 if (dump_file)
1372 fprintf (dump_file, "Not inlining: %s",
1373 cgraph_inline_failed_string (e->inline_failed));
1374 continue;
1377 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1378 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1380 if (dump_file)
1381 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1382 continue;
1385 /* Inline the edge and flatten the inline clone. Avoid
1386 recursing through the original node if the node was cloned. */
1387 if (dump_file)
1388 fprintf (dump_file, " Inlining %s into %s.\n",
1389 cgraph_node_name (e->callee),
1390 cgraph_node_name (e->caller));
1391 orig_callee = e->callee;
1392 cgraph_mark_inline_edge (e, true, NULL);
1393 if (e->callee != orig_callee)
1394 orig_callee->aux = (void *)(size_t) INLINE_ALL;
1395 cgraph_flatten (e->callee);
1396 if (e->callee != orig_callee)
1397 orig_callee->aux = NULL;
1400 node->aux = NULL;
1403 /* Decide on the inlining. We do so in the topological order to avoid
1404 expenses on updating data structures. */
1406 static unsigned int
1407 cgraph_decide_inlining (void)
1409 struct cgraph_node *node;
1410 int nnodes;
1411 struct cgraph_node **order =
1412 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1413 int old_size = 0;
1414 int i;
1415 int initial_size = 0;
1417 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1418 if (in_lto_p && flag_indirect_inlining)
1419 ipa_update_after_lto_read ();
1420 if (flag_indirect_inlining)
1421 ipa_create_all_structures_for_iinln ();
1423 max_count = 0;
1424 max_benefit = 0;
1425 for (node = cgraph_nodes; node; node = node->next)
1426 if (node->analyzed)
1428 struct cgraph_edge *e;
1430 gcc_assert (inline_summary (node)->self_size == node->global.size);
1431 if (!DECL_EXTERNAL (node->decl))
1432 initial_size += node->global.size;
1433 for (e = node->callees; e; e = e->next_callee)
1434 if (max_count < e->count)
1435 max_count = e->count;
1436 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1437 max_benefit = inline_summary (node)->time_inlining_benefit;
1439 gcc_assert (in_lto_p
1440 || !max_count
1441 || (profile_info && flag_branch_probabilities));
1442 overall_size = initial_size;
1444 nnodes = cgraph_postorder (order);
1446 if (dump_file)
1447 fprintf (dump_file,
1448 "\nDeciding on inlining. Starting with size %i.\n",
1449 initial_size);
1451 for (node = cgraph_nodes; node; node = node->next)
1452 node->aux = 0;
1454 if (dump_file)
1455 fprintf (dump_file, "\nFlattening functions:\n");
1457 /* In the first pass handle functions to be flattened. Do this with
1458 a priority so none of our later choices will make this impossible. */
1459 for (i = nnodes - 1; i >= 0; i--)
1461 node = order[i];
1463 /* Handle nodes to be flattened, but don't update overall unit
1464 size. Calling the incremental inliner here is lame,
1465 a simple worklist should be enough. What should be left
1466 here from the early inliner (if it runs) is cyclic cases.
1467 Ideally when processing callees we stop inlining at the
1468 entry of cycles, possibly cloning that entry point and
1469 try to flatten itself turning it into a self-recursive
1470 function. */
1471 if (lookup_attribute ("flatten",
1472 DECL_ATTRIBUTES (node->decl)) != NULL)
1474 if (dump_file)
1475 fprintf (dump_file,
1476 "Flattening %s\n", cgraph_node_name (node));
1477 cgraph_flatten (node);
1481 cgraph_decide_inlining_of_small_functions ();
1483 if (flag_inline_functions_called_once)
1485 if (dump_file)
1486 fprintf (dump_file, "\nDeciding on functions called once:\n");
1488 /* And finally decide what functions are called once. */
1489 for (i = nnodes - 1; i >= 0; i--)
1491 node = order[i];
1493 if (node->callers
1494 && !node->callers->next_caller
1495 && !node->global.inlined_to
1496 && cgraph_will_be_removed_from_program_if_no_direct_calls (node)
1497 && node->local.inlinable
1498 && cgraph_function_body_availability (node) >= AVAIL_AVAILABLE
1499 && node->callers->inline_failed
1500 && node->callers->caller != node
1501 && node->callers->caller->global.inlined_to != node
1502 && !node->callers->call_stmt_cannot_inline_p
1503 && tree_can_inline_p (node->callers)
1504 && !DECL_EXTERNAL (node->decl))
1506 cgraph_inline_failed_t reason;
1507 old_size = overall_size;
1508 if (dump_file)
1510 fprintf (dump_file,
1511 "\nConsidering %s size %i.\n",
1512 cgraph_node_name (node), node->global.size);
1513 fprintf (dump_file,
1514 " Called once from %s %i insns.\n",
1515 cgraph_node_name (node->callers->caller),
1516 node->callers->caller->global.size);
1519 if (cgraph_check_inline_limits (node->callers->caller, node,
1520 &reason))
1522 struct cgraph_node *caller = node->callers->caller;
1523 cgraph_mark_inline_edge (node->callers, true, NULL);
1524 if (dump_file)
1525 fprintf (dump_file,
1526 " Inlined into %s which now has %i size"
1527 " for a net change of %+i size.\n",
1528 cgraph_node_name (caller),
1529 caller->global.size,
1530 overall_size - old_size);
1532 else
1534 if (dump_file)
1535 fprintf (dump_file,
1536 " Not inlining: %s.\n",
1537 cgraph_inline_failed_string (reason));
1543 /* Free ipa-prop structures if they are no longer needed. */
1544 if (flag_indirect_inlining)
1545 ipa_free_all_structures_after_iinln ();
1547 if (dump_file)
1548 fprintf (dump_file,
1549 "\nInlined %i calls, eliminated %i functions, "
1550 "size %i turned to %i size.\n\n",
1551 ncalls_inlined, nfunctions_inlined, initial_size,
1552 overall_size);
1553 free (order);
1554 return 0;
1557 /* Return true when N is leaf function. Accept cheap builtins
1558 in leaf functions. */
1560 static bool
1561 leaf_node_p (struct cgraph_node *n)
1563 struct cgraph_edge *e;
1564 for (e = n->callees; e; e = e->next_callee)
1565 if (!is_inexpensive_builtin (e->callee->decl))
1566 return false;
1567 return true;
1570 /* Decide on the inlining. We do so in the topological order to avoid
1571 expenses on updating data structures. */
1573 static bool
1574 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1575 enum inlining_mode mode)
1577 struct cgraph_edge *e;
1578 bool inlined = false;
1579 cgraph_inline_failed_t failed_reason;
1581 #ifdef ENABLE_CHECKING
1582 verify_cgraph_node (node);
1583 #endif
1585 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1586 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1588 if (dump_file)
1589 fprintf (dump_file, "Incrementally flattening %s\n",
1590 cgraph_node_name (node));
1591 mode = INLINE_ALL;
1594 /* First of all look for always inline functions. */
1595 if (mode != INLINE_SIZE_NORECURSIVE)
1596 for (e = node->callees; e; e = e->next_callee)
1598 if (!e->callee->local.disregard_inline_limits
1599 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1600 continue;
1601 if (dump_file)
1602 fprintf (dump_file,
1603 "Considering to always inline inline candidate %s.\n",
1604 cgraph_node_name (e->callee));
1605 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1607 if (dump_file)
1608 fprintf (dump_file, "Not inlining: recursive call.\n");
1609 continue;
1611 if (!tree_can_inline_p (e)
1612 || e->call_stmt_cannot_inline_p)
1614 if (dump_file)
1615 fprintf (dump_file,
1616 "Not inlining: %s",
1617 cgraph_inline_failed_string (e->inline_failed));
1618 continue;
1620 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1621 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1623 if (dump_file)
1624 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1625 continue;
1627 if (!e->callee->analyzed)
1629 if (dump_file)
1630 fprintf (dump_file,
1631 "Not inlining: Function body no longer available.\n");
1632 continue;
1635 if (dump_file)
1636 fprintf (dump_file, " Inlining %s into %s.\n",
1637 cgraph_node_name (e->callee),
1638 cgraph_node_name (e->caller));
1639 cgraph_mark_inline_edge (e, true, NULL);
1640 inlined = true;
1643 /* Now do the automatic inlining. */
1644 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE
1645 /* Never inline regular functions into always-inline functions
1646 during incremental inlining. */
1647 && !node->local.disregard_inline_limits)
1649 for (e = node->callees; e; e = e->next_callee)
1651 int allowed_growth = 0;
1652 if (!e->callee->local.inlinable
1653 || !e->inline_failed
1654 || e->callee->local.disregard_inline_limits)
1655 continue;
1656 if (dump_file)
1657 fprintf (dump_file, "Considering inline candidate %s.\n",
1658 cgraph_node_name (e->callee));
1659 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1661 if (dump_file)
1662 fprintf (dump_file, "Not inlining: recursive call.\n");
1663 continue;
1665 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1666 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1668 if (dump_file)
1669 fprintf (dump_file,
1670 "Not inlining: SSA form does not match.\n");
1671 continue;
1674 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1675 && optimize_function_for_speed_p (cfun))
1676 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1678 /* When the function body would grow and inlining the function
1679 won't eliminate the need for offline copy of the function,
1680 don't inline. */
1681 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1682 || (!flag_inline_functions
1683 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1684 && (cgraph_estimate_size_after_inlining (e->caller, e->callee)
1685 > e->caller->global.size + allowed_growth)
1686 && cgraph_estimate_growth (e->callee) > allowed_growth)
1688 if (dump_file)
1689 fprintf (dump_file,
1690 "Not inlining: code size would grow by %i.\n",
1691 cgraph_estimate_size_after_inlining (e->caller,
1692 e->callee)
1693 - e->caller->global.size);
1694 continue;
1696 if (e->call_stmt_cannot_inline_p
1697 || !tree_can_inline_p (e))
1699 if (dump_file)
1700 fprintf (dump_file,
1701 "Not inlining: call site not inlinable.\n");
1702 continue;
1704 if (!e->callee->analyzed)
1706 if (dump_file)
1707 fprintf (dump_file,
1708 "Not inlining: Function body no longer available.\n");
1709 continue;
1711 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed))
1713 if (dump_file)
1714 fprintf (dump_file, "Not inlining: %s.\n",
1715 cgraph_inline_failed_string (e->inline_failed));
1716 continue;
1718 if (cgraph_default_inline_p (e->callee, &failed_reason))
1720 if (dump_file)
1721 fprintf (dump_file, " Inlining %s into %s.\n",
1722 cgraph_node_name (e->callee),
1723 cgraph_node_name (e->caller));
1724 cgraph_mark_inline_edge (e, true, NULL);
1725 inlined = true;
1729 return inlined;
1732 /* Because inlining might remove no-longer reachable nodes, we need to
1733 keep the array visible to garbage collector to avoid reading collected
1734 out nodes. */
1735 static int nnodes;
1736 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1738 /* Do inlining of small functions. Doing so early helps profiling and other
1739 passes to be somewhat more effective and avoids some code duplication in
1740 later real inlining pass for testcases with very many function calls. */
1741 static unsigned int
1742 cgraph_early_inlining (void)
1744 struct cgraph_node *node = cgraph_node (current_function_decl);
1745 unsigned int todo = 0;
1746 int iterations = 0;
1748 if (seen_error ())
1749 return 0;
1751 if (!optimize
1752 || flag_no_inline
1753 || !flag_early_inlining)
1755 /* When not optimizing or not inlining inline only always-inline
1756 functions. */
1757 cgraph_decide_inlining_incrementally (node, INLINE_ALWAYS_INLINE);
1758 timevar_push (TV_INTEGRATION);
1759 todo |= optimize_inline_calls (current_function_decl);
1760 timevar_pop (TV_INTEGRATION);
1762 else
1764 if (lookup_attribute ("flatten",
1765 DECL_ATTRIBUTES (node->decl)) != NULL)
1767 if (dump_file)
1768 fprintf (dump_file,
1769 "Flattening %s\n", cgraph_node_name (node));
1770 cgraph_flatten (node);
1771 timevar_push (TV_INTEGRATION);
1772 todo |= optimize_inline_calls (current_function_decl);
1773 timevar_pop (TV_INTEGRATION);
1775 /* We iterate incremental inlining to get trivial cases of indirect
1776 inlining. */
1777 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1778 && cgraph_decide_inlining_incrementally (node,
1779 iterations
1780 ? INLINE_SIZE_NORECURSIVE
1781 : INLINE_SIZE))
1783 timevar_push (TV_INTEGRATION);
1784 todo |= optimize_inline_calls (current_function_decl);
1785 iterations++;
1786 timevar_pop (TV_INTEGRATION);
1788 if (dump_file)
1789 fprintf (dump_file, "Iterations: %i\n", iterations);
1792 cfun->always_inline_functions_inlined = true;
1794 return todo;
1797 struct gimple_opt_pass pass_early_inline =
1800 GIMPLE_PASS,
1801 "einline", /* name */
1802 NULL, /* gate */
1803 cgraph_early_inlining, /* execute */
1804 NULL, /* sub */
1805 NULL, /* next */
1806 0, /* static_pass_number */
1807 TV_INLINE_HEURISTICS, /* tv_id */
1808 0, /* properties_required */
1809 0, /* properties_provided */
1810 0, /* properties_destroyed */
1811 0, /* todo_flags_start */
1812 TODO_dump_func /* todo_flags_finish */
1817 /* See if statement might disappear after inlining.
1818 0 - means not eliminated
1819 1 - half of statements goes away
1820 2 - for sure it is eliminated.
1821 We are not terribly sophisticated, basically looking for simple abstraction
1822 penalty wrappers. */
1824 static int
1825 eliminated_by_inlining_prob (gimple stmt)
1827 enum gimple_code code = gimple_code (stmt);
1828 switch (code)
1830 case GIMPLE_RETURN:
1831 return 2;
1832 case GIMPLE_ASSIGN:
1833 if (gimple_num_ops (stmt) != 2)
1834 return 0;
1836 /* Casts of parameters, loads from parameters passed by reference
1837 and stores to return value or parameters are often free after
1838 inlining dua to SRA and further combining.
1839 Assume that half of statements goes away. */
1840 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1841 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1842 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1843 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1845 tree rhs = gimple_assign_rhs1 (stmt);
1846 tree lhs = gimple_assign_lhs (stmt);
1847 tree inner_rhs = rhs;
1848 tree inner_lhs = lhs;
1849 bool rhs_free = false;
1850 bool lhs_free = false;
1852 while (handled_component_p (inner_lhs)
1853 || TREE_CODE (inner_lhs) == MEM_REF)
1854 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1855 while (handled_component_p (inner_rhs)
1856 || TREE_CODE (inner_rhs) == ADDR_EXPR
1857 || TREE_CODE (inner_rhs) == MEM_REF)
1858 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1861 if (TREE_CODE (inner_rhs) == PARM_DECL
1862 || (TREE_CODE (inner_rhs) == SSA_NAME
1863 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1864 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1865 rhs_free = true;
1866 if (rhs_free && is_gimple_reg (lhs))
1867 lhs_free = true;
1868 if (((TREE_CODE (inner_lhs) == PARM_DECL
1869 || (TREE_CODE (inner_lhs) == SSA_NAME
1870 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1871 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1872 && inner_lhs != lhs)
1873 || TREE_CODE (inner_lhs) == RESULT_DECL
1874 || (TREE_CODE (inner_lhs) == SSA_NAME
1875 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1876 lhs_free = true;
1877 if (lhs_free
1878 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1879 rhs_free = true;
1880 if (lhs_free && rhs_free)
1881 return 1;
1883 return 0;
1884 default:
1885 return 0;
1889 /* Compute function body size parameters for NODE. */
1891 static void
1892 estimate_function_body_sizes (struct cgraph_node *node)
1894 gcov_type time = 0;
1895 gcov_type time_inlining_benefit = 0;
1896 /* Estimate static overhead for function prologue/epilogue and alignment. */
1897 int size = 2;
1898 /* Benefits are scaled by probability of elimination that is in range
1899 <0,2>. */
1900 int size_inlining_benefit = 2 * 2;
1901 basic_block bb;
1902 gimple_stmt_iterator bsi;
1903 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1904 tree arg;
1905 int freq;
1906 tree funtype = TREE_TYPE (node->decl);
1908 if (dump_file)
1909 fprintf (dump_file, "Analyzing function body size: %s\n",
1910 cgraph_node_name (node));
1912 gcc_assert (my_function && my_function->cfg);
1913 FOR_EACH_BB_FN (bb, my_function)
1915 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1916 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1918 gimple stmt = gsi_stmt (bsi);
1919 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1920 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1921 int prob;
1923 if (dump_file && (dump_flags & TDF_DETAILS))
1925 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1926 freq, this_size, this_time);
1927 print_gimple_stmt (dump_file, stmt, 0, 0);
1929 this_time *= freq;
1930 time += this_time;
1931 size += this_size;
1932 prob = eliminated_by_inlining_prob (stmt);
1933 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
1934 fprintf (dump_file, " 50%% will be eliminated by inlining\n");
1935 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
1936 fprintf (dump_file, " will eliminated by inlining\n");
1937 size_inlining_benefit += this_size * prob;
1938 time_inlining_benefit += this_time * prob;
1939 gcc_assert (time >= 0);
1940 gcc_assert (size >= 0);
1943 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1944 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE)
1945 / (CGRAPH_FREQ_BASE * 2));
1946 size_inlining_benefit = (size_inlining_benefit + 1) / 2;
1947 if (dump_file)
1948 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1949 (int)time, (int)time_inlining_benefit,
1950 size, size_inlining_benefit);
1951 time_inlining_benefit += eni_time_weights.call_cost;
1952 size_inlining_benefit += eni_size_weights.call_cost;
1953 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1955 int cost = estimate_move_cost (TREE_TYPE (funtype));
1956 time_inlining_benefit += cost;
1957 size_inlining_benefit += cost;
1959 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = DECL_CHAIN (arg))
1960 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1962 int cost = estimate_move_cost (TREE_TYPE (arg));
1963 time_inlining_benefit += cost;
1964 size_inlining_benefit += cost;
1966 if (time_inlining_benefit > MAX_TIME)
1967 time_inlining_benefit = MAX_TIME;
1968 if (time > MAX_TIME)
1969 time = MAX_TIME;
1970 inline_summary (node)->self_time = time;
1971 inline_summary (node)->self_size = size;
1972 if (dump_file)
1973 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
1974 (int)time, (int)time_inlining_benefit,
1975 size, size_inlining_benefit);
1976 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
1977 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
1980 /* Compute parameters of functions used by inliner. */
1981 void
1982 compute_inline_parameters (struct cgraph_node *node)
1984 HOST_WIDE_INT self_stack_size;
1986 gcc_assert (!node->global.inlined_to);
1988 /* Estimate the stack size for the function if we're optimizing. */
1989 self_stack_size = optimize ? estimated_stack_frame_size (node) : 0;
1990 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1991 node->global.estimated_stack_size = self_stack_size;
1992 node->global.stack_frame_offset = 0;
1994 /* Can this function be inlined at all? */
1995 node->local.inlinable = tree_inlinable_function_p (node->decl);
1996 if (!node->local.inlinable)
1997 node->local.disregard_inline_limits = 0;
1999 /* Inlinable functions always can change signature. */
2000 if (node->local.inlinable)
2001 node->local.can_change_signature = true;
2002 else
2004 struct cgraph_edge *e;
2006 /* Functions calling builtin_apply can not change signature. */
2007 for (e = node->callees; e; e = e->next_callee)
2008 if (DECL_BUILT_IN (e->callee->decl)
2009 && DECL_BUILT_IN_CLASS (e->callee->decl) == BUILT_IN_NORMAL
2010 && DECL_FUNCTION_CODE (e->callee->decl) == BUILT_IN_APPLY_ARGS)
2011 break;
2012 node->local.can_change_signature = !e;
2014 estimate_function_body_sizes (node);
2015 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2016 node->global.time = inline_summary (node)->self_time;
2017 node->global.size = inline_summary (node)->self_size;
2021 /* Compute parameters of functions used by inliner using
2022 current_function_decl. */
2023 static unsigned int
2024 compute_inline_parameters_for_current (void)
2026 compute_inline_parameters (cgraph_node (current_function_decl));
2027 return 0;
2030 struct gimple_opt_pass pass_inline_parameters =
2033 GIMPLE_PASS,
2034 "inline_param", /* name */
2035 NULL, /* gate */
2036 compute_inline_parameters_for_current,/* execute */
2037 NULL, /* sub */
2038 NULL, /* next */
2039 0, /* static_pass_number */
2040 TV_INLINE_HEURISTICS, /* tv_id */
2041 0, /* properties_required */
2042 0, /* properties_provided */
2043 0, /* properties_destroyed */
2044 0, /* todo_flags_start */
2045 0 /* todo_flags_finish */
2049 /* This function performs intraprocedural analysis in NODE that is required to
2050 inline indirect calls. */
2051 static void
2052 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
2054 ipa_analyze_node (node);
2055 if (dump_file && (dump_flags & TDF_DETAILS))
2057 ipa_print_node_params (dump_file, node);
2058 ipa_print_node_jump_functions (dump_file, node);
2062 /* Note function body size. */
2063 static void
2064 analyze_function (struct cgraph_node *node)
2066 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2067 current_function_decl = node->decl;
2069 compute_inline_parameters (node);
2070 /* FIXME: We should remove the optimize check after we ensure we never run
2071 IPA passes when not optimizing. */
2072 if (flag_indirect_inlining && optimize)
2073 inline_indirect_intraprocedural_analysis (node);
2075 current_function_decl = NULL;
2076 pop_cfun ();
2079 /* Called when new function is inserted to callgraph late. */
2080 static void
2081 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
2083 analyze_function (node);
2086 /* Note function body size. */
2087 static void
2088 inline_generate_summary (void)
2090 struct cgraph_node *node;
2092 function_insertion_hook_holder =
2093 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2095 if (flag_indirect_inlining)
2097 ipa_register_cgraph_hooks ();
2098 ipa_check_create_node_params ();
2099 ipa_check_create_edge_args ();
2102 for (node = cgraph_nodes; node; node = node->next)
2103 if (node->analyzed)
2104 analyze_function (node);
2106 return;
2109 /* Apply inline plan to function. */
2110 static unsigned int
2111 inline_transform (struct cgraph_node *node)
2113 unsigned int todo = 0;
2114 struct cgraph_edge *e;
2115 bool inline_p = false;
2117 /* FIXME: Currently the pass manager is adding inline transform more than once to some
2118 clones. This needs revisiting after WPA cleanups. */
2119 if (cfun->after_inlining)
2120 return 0;
2122 /* We might need the body of this function so that we can expand
2123 it inline somewhere else. */
2124 if (cgraph_preserve_function_body_p (node->decl))
2125 save_inline_function_body (node);
2127 for (e = node->callees; e; e = e->next_callee)
2129 cgraph_redirect_edge_call_stmt_to_callee (e);
2130 if (!e->inline_failed || warn_inline)
2131 inline_p = true;
2134 if (inline_p)
2136 timevar_push (TV_INTEGRATION);
2137 todo = optimize_inline_calls (current_function_decl);
2138 timevar_pop (TV_INTEGRATION);
2140 cfun->always_inline_functions_inlined = true;
2141 cfun->after_inlining = true;
2142 return todo | execute_fixup_cfg ();
2145 /* Read inline summary. Jump functions are shared among ipa-cp
2146 and inliner, so when ipa-cp is active, we don't need to write them
2147 twice. */
2149 static void
2150 inline_read_summary (void)
2152 if (flag_indirect_inlining)
2154 ipa_register_cgraph_hooks ();
2155 if (!flag_ipa_cp)
2156 ipa_prop_read_jump_functions ();
2158 function_insertion_hook_holder =
2159 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2162 /* Write inline summary for node in SET.
2163 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
2164 active, we don't need to write them twice. */
2166 static void
2167 inline_write_summary (cgraph_node_set set,
2168 varpool_node_set vset ATTRIBUTE_UNUSED)
2170 if (flag_indirect_inlining && !flag_ipa_cp)
2171 ipa_prop_write_jump_functions (set);
2174 /* When to run IPA inlining. Inlining of always-inline functions
2175 happens during early inlining. */
2177 static bool
2178 gate_cgraph_decide_inlining (void)
2180 /* ??? We'd like to skip this if not optimizing or not inlining as
2181 all always-inline functions have been processed by early
2182 inlining already. But this at least breaks EH with C++ as
2183 we need to unconditionally run fixup_cfg even at -O0.
2184 So leave it on unconditionally for now. */
2185 return 1;
2188 struct ipa_opt_pass_d pass_ipa_inline =
2191 IPA_PASS,
2192 "inline", /* name */
2193 gate_cgraph_decide_inlining, /* gate */
2194 cgraph_decide_inlining, /* execute */
2195 NULL, /* sub */
2196 NULL, /* next */
2197 0, /* static_pass_number */
2198 TV_INLINE_HEURISTICS, /* tv_id */
2199 0, /* properties_required */
2200 0, /* properties_provided */
2201 0, /* properties_destroyed */
2202 TODO_remove_functions, /* todo_flags_finish */
2203 TODO_dump_cgraph | TODO_dump_func
2204 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
2206 inline_generate_summary, /* generate_summary */
2207 inline_write_summary, /* write_summary */
2208 inline_read_summary, /* read_summary */
2209 NULL, /* write_optimization_summary */
2210 NULL, /* read_optimization_summary */
2211 NULL, /* stmt_fixup */
2212 0, /* TODOs */
2213 inline_transform, /* function_transform */
2214 NULL, /* variable_transform */
2218 #include "gt-ipa-inline.h"