PR tree-optimization/43833
[official-gcc/alias-decl.git] / gcc / ipa-inline.c
blobfbd695d129ca41fe0e6807d8697f228b71b120d2
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
29 There are three major parts of this file:
31 cgraph_mark_inline implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
43 inlining heuristics
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used by early inliner.
66 The inliner itself is split into several passes:
68 pass_inline_parameters
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
79 pass_early_inlining
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
86 The pass is run during conversion into SSA form. Only functions already
87 converted into SSA form are inlined, so the conversion must happen in
88 topological order on the callgraph (that is maintained by pass manager).
89 The functions after inlining are early optimized so the early inliner sees
90 unoptimized function itself, but all considered callees are already
91 optimized allowing it to unfold abstraction penalty on C++ effectively and
92 cheaply.
94 pass_ipa_early_inlining
96 With profiling, the early inlining is also necessary to reduce
97 instrumentation costs on program with high abstraction penalty (doing
98 many redundant calls). This can't happen in parallel with early
99 optimization and profile instrumentation, because we would end up
100 re-instrumenting already instrumented function bodies we brought in via
101 inlining.
103 To avoid this, this pass is executed as IPA pass before profiling. It is
104 simple wrapper to pass_early_inlining and ensures first inlining.
106 pass_ipa_inline
108 This is the main pass implementing simple greedy algorithm to do inlining
109 of small functions that results in overall growth of compilation unit and
110 inlining of functions called once. The pass compute just so called inline
111 plan (representation of inlining to be done in callgraph) and unlike early
112 inlining it is not performing the inlining itself.
114 pass_apply_inline
116 This pass performs actual inlining according to pass_ipa_inline on given
117 function. Possible the function body before inlining is saved when it is
118 needed for further inlining later.
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "tm.h"
125 #include "tree.h"
126 #include "tree-inline.h"
127 #include "langhooks.h"
128 #include "flags.h"
129 #include "cgraph.h"
130 #include "diagnostic.h"
131 #include "timevar.h"
132 #include "params.h"
133 #include "fibheap.h"
134 #include "intl.h"
135 #include "tree-pass.h"
136 #include "hashtab.h"
137 #include "coverage.h"
138 #include "ggc.h"
139 #include "tree-flow.h"
140 #include "rtl.h"
141 #include "ipa-prop.h"
142 #include "except.h"
144 #define MAX_TIME 1000000000
146 /* Mode incremental inliner operate on:
148 In ALWAYS_INLINE only functions marked
149 always_inline are inlined. This mode is used after detecting cycle during
150 flattening.
152 In SIZE mode, only functions that reduce function body size after inlining
153 are inlined, this is used during early inlining.
155 in ALL mode, everything is inlined. This is used during flattening. */
156 enum inlining_mode {
157 INLINE_NONE = 0,
158 INLINE_ALWAYS_INLINE,
159 INLINE_SIZE_NORECURSIVE,
160 INLINE_SIZE,
161 INLINE_ALL
164 static bool
165 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode);
166 static void cgraph_flatten (struct cgraph_node *node);
169 /* Statistics we collect about inlining algorithm. */
170 static int ncalls_inlined;
171 static int nfunctions_inlined;
172 static int overall_size;
173 static gcov_type max_count, max_benefit;
175 /* Holders of ipa cgraph hooks: */
176 static struct cgraph_node_hook_list *function_insertion_hook_holder;
178 static inline struct inline_summary *
179 inline_summary (struct cgraph_node *node)
181 return &node->local.inline_summary;
184 /* Estimate self time of the function after inlining WHAT into TO. */
186 static int
187 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
188 struct cgraph_node *what)
190 gcov_type time = (((gcov_type)what->global.time
191 - inline_summary (what)->time_inlining_benefit)
192 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
193 + to->global.time;
194 if (time < 0)
195 time = 0;
196 if (time > MAX_TIME)
197 time = MAX_TIME;
198 return time;
201 /* Estimate self time of the function after inlining WHAT into TO. */
203 static int
204 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
205 struct cgraph_node *what)
207 int size = (what->global.size - inline_summary (what)->size_inlining_benefit) * times + to->global.size;
208 gcc_assert (size >= 0);
209 return size;
212 /* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
213 by NEST. */
215 static void
216 update_noncloned_frequencies (struct cgraph_node *node,
217 int freq_scale, int nest)
219 struct cgraph_edge *e;
221 /* We do not want to ignore high loop nest after freq drops to 0. */
222 if (!freq_scale)
223 freq_scale = 1;
224 for (e = node->callees; e; e = e->next_callee)
226 e->loop_nest += nest;
227 e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
228 if (e->frequency > CGRAPH_FREQ_MAX)
229 e->frequency = CGRAPH_FREQ_MAX;
230 if (!e->inline_failed)
231 update_noncloned_frequencies (e->callee, freq_scale, nest);
235 /* E is expected to be an edge being inlined. Clone destination node of
236 the edge and redirect it to the new clone.
237 DUPLICATE is used for bookkeeping on whether we are actually creating new
238 clones or re-using node originally representing out-of-line function call.
240 void
241 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
242 bool update_original)
244 HOST_WIDE_INT peak;
246 if (duplicate)
248 /* We may eliminate the need for out-of-line copy to be output.
249 In that case just go ahead and re-use it. */
250 if (!e->callee->callers->next_caller
251 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
252 /* Don't reuse if more than one function shares a comdat group.
253 If the other function(s) are needed, we need to emit even
254 this function out of line. */
255 && !e->callee->same_comdat_group
256 && !cgraph_new_nodes)
258 gcc_assert (!e->callee->global.inlined_to);
259 if (e->callee->analyzed)
261 overall_size -= e->callee->global.size;
262 nfunctions_inlined++;
264 duplicate = false;
265 e->callee->local.externally_visible = false;
266 update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
268 else
270 struct cgraph_node *n;
271 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
272 update_original, NULL);
273 cgraph_redirect_edge_callee (e, n);
277 if (e->caller->global.inlined_to)
278 e->callee->global.inlined_to = e->caller->global.inlined_to;
279 else
280 e->callee->global.inlined_to = e->caller;
281 e->callee->global.stack_frame_offset
282 = e->caller->global.stack_frame_offset
283 + inline_summary (e->caller)->estimated_self_stack_size;
284 peak = e->callee->global.stack_frame_offset
285 + inline_summary (e->callee)->estimated_self_stack_size;
286 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
287 e->callee->global.inlined_to->global.estimated_stack_size = peak;
289 /* Recursively clone all bodies. */
290 for (e = e->callee->callees; e; e = e->next_callee)
291 if (!e->inline_failed)
292 cgraph_clone_inlined_nodes (e, duplicate, update_original);
295 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
296 specify whether profile of original function should be updated. If any new
297 indirect edges are discovered in the process, add them to NEW_EDGES, unless
298 it is NULL. Return true iff any new callgraph edges were discovered as a
299 result of inlining. */
301 static bool
302 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
303 VEC (cgraph_edge_p, heap) **new_edges)
305 int old_size = 0, new_size = 0;
306 struct cgraph_node *to = NULL, *what;
307 struct cgraph_edge *curr = e;
308 int freq;
310 gcc_assert (e->inline_failed);
311 e->inline_failed = CIF_OK;
313 if (!e->callee->global.inlined)
314 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
315 e->callee->global.inlined = true;
317 cgraph_clone_inlined_nodes (e, true, update_original);
319 what = e->callee;
321 freq = e->frequency;
322 /* Now update size of caller and all functions caller is inlined into. */
323 for (;e && !e->inline_failed; e = e->caller->callers)
325 to = e->caller;
326 old_size = e->caller->global.size;
327 new_size = cgraph_estimate_size_after_inlining (1, to, what);
328 to->global.size = new_size;
329 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
331 gcc_assert (what->global.inlined_to == to);
332 if (new_size > old_size)
333 overall_size += new_size - old_size;
334 ncalls_inlined++;
336 if (flag_indirect_inlining)
337 return ipa_propagate_indirect_call_infos (curr, new_edges);
338 else
339 return false;
342 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER. */
344 static void
345 cgraph_mark_inline (struct cgraph_edge *edge)
347 struct cgraph_node *to = edge->caller;
348 struct cgraph_node *what = edge->callee;
349 struct cgraph_edge *e, *next;
351 gcc_assert (!edge->call_stmt_cannot_inline_p);
352 /* Look for all calls, mark them inline and clone recursively
353 all inlined functions. */
354 for (e = what->callers; e; e = next)
356 next = e->next_caller;
357 if (e->caller == to && e->inline_failed)
359 cgraph_mark_inline_edge (e, true, NULL);
360 if (e == edge)
361 edge = next;
366 /* Estimate the growth caused by inlining NODE into all callees. */
368 static int
369 cgraph_estimate_growth (struct cgraph_node *node)
371 int growth = 0;
372 struct cgraph_edge *e;
373 bool self_recursive = false;
375 if (node->global.estimated_growth != INT_MIN)
376 return node->global.estimated_growth;
378 for (e = node->callers; e; e = e->next_caller)
380 if (e->caller == node)
381 self_recursive = true;
382 if (e->inline_failed)
383 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
384 - e->caller->global.size);
387 /* ??? Wrong for non-trivially self recursive functions or cases where
388 we decide to not inline for different reasons, but it is not big deal
389 as in that case we will keep the body around, but we will also avoid
390 some inlining. */
391 if (cgraph_only_called_directly_p (node)
392 && !DECL_EXTERNAL (node->decl) && !self_recursive)
393 growth -= node->global.size;
395 node->global.estimated_growth = growth;
396 return growth;
399 /* Return false when inlining WHAT into TO is not good idea
400 as it would cause too large growth of function bodies.
401 When ONE_ONLY is true, assume that only one call site is going
402 to be inlined, otherwise figure out how many call sites in
403 TO calls WHAT and verify that all can be inlined.
406 static bool
407 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
408 cgraph_inline_failed_t *reason, bool one_only)
410 int times = 0;
411 struct cgraph_edge *e;
412 int newsize;
413 int limit;
414 HOST_WIDE_INT stack_size_limit, inlined_stack;
416 if (one_only)
417 times = 1;
418 else
419 for (e = to->callees; e; e = e->next_callee)
420 if (e->callee == what)
421 times++;
423 if (to->global.inlined_to)
424 to = to->global.inlined_to;
426 /* When inlining large function body called once into small function,
427 take the inlined function as base for limiting the growth. */
428 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
429 limit = inline_summary (to)->self_size;
430 else
431 limit = inline_summary (what)->self_size;
433 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
435 /* Check the size after inlining against the function limits. But allow
436 the function to shrink if it went over the limits by forced inlining. */
437 newsize = cgraph_estimate_size_after_inlining (times, to, what);
438 if (newsize >= to->global.size
439 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
440 && newsize > limit)
442 if (reason)
443 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
444 return false;
447 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
449 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
451 inlined_stack = (to->global.stack_frame_offset
452 + inline_summary (to)->estimated_self_stack_size
453 + what->global.estimated_stack_size);
454 if (inlined_stack > stack_size_limit
455 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
457 if (reason)
458 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
459 return false;
461 return true;
464 /* Return true when function N is small enough to be inlined. */
466 static bool
467 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
469 tree decl = n->decl;
471 if (n->local.disregard_inline_limits)
472 return true;
474 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
476 if (reason)
477 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
478 return false;
481 if (!n->analyzed)
483 if (reason)
484 *reason = CIF_BODY_NOT_AVAILABLE;
485 return false;
488 if (DECL_DECLARED_INLINE_P (decl))
490 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
492 if (reason)
493 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
494 return false;
497 else
499 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
501 if (reason)
502 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
503 return false;
507 return true;
510 /* Return true when inlining WHAT would create recursive inlining.
511 We call recursive inlining all cases where same function appears more than
512 once in the single recursion nest path in the inline graph. */
514 static bool
515 cgraph_recursive_inlining_p (struct cgraph_node *to,
516 struct cgraph_node *what,
517 cgraph_inline_failed_t *reason)
519 bool recursive;
520 if (to->global.inlined_to)
521 recursive = what->decl == to->global.inlined_to->decl;
522 else
523 recursive = what->decl == to->decl;
524 /* Marking recursive function inline has sane semantic and thus we should
525 not warn on it. */
526 if (recursive && reason)
527 *reason = (what->local.disregard_inline_limits
528 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
529 return recursive;
532 /* A cost model driving the inlining heuristics in a way so the edges with
533 smallest badness are inlined first. After each inlining is performed
534 the costs of all caller edges of nodes affected are recomputed so the
535 metrics may accurately depend on values such as number of inlinable callers
536 of the function or function body size. */
538 static int
539 cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
541 gcov_type badness;
542 int growth =
543 (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
544 - edge->caller->global.size);
546 if (dump)
548 fprintf (dump_file, " Badness calculcation for %s -> %s\n",
549 cgraph_node_name (edge->caller),
550 cgraph_node_name (edge->callee));
551 fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
552 growth,
553 edge->callee->global.time,
554 inline_summary (edge->callee)->time_inlining_benefit,
555 edge->callee->global.size,
556 inline_summary (edge->callee)->size_inlining_benefit);
559 /* Always prefer inlining saving code size. */
560 if (growth <= 0)
562 badness = INT_MIN - growth;
563 if (dump)
564 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
565 growth);
568 /* When profiling is available, base priorities -(#calls / growth).
569 So we optimize for overall number of "executed" inlined calls. */
570 else if (max_count)
572 badness =
573 ((int)
574 ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
575 (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
576 if (dump)
578 fprintf (dump_file,
579 " %i (relative %f): profile info. Relative count %f"
580 " * Relative benefit %f\n",
581 (int) badness, (double) badness / INT_MIN,
582 (double) edge->count / max_count,
583 (double) (inline_summary (edge->callee)->
584 time_inlining_benefit + 1) / (max_benefit + 1));
588 /* When function local profile is available, base priorities on
589 growth / frequency, so we optimize for overall frequency of inlined
590 calls. This is not too accurate since while the call might be frequent
591 within function, the function itself is infrequent.
593 Other objective to optimize for is number of different calls inlined.
594 We add the estimated growth after inlining all functions to bias the
595 priorities slightly in this direction (so fewer times called functions
596 of the same size gets priority). */
597 else if (flag_guess_branch_prob)
599 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
600 int benefitperc;
601 int growth_for_all;
602 badness = growth * 10000;
603 benefitperc =
604 MIN (100 * inline_summary (edge->callee)->time_inlining_benefit /
605 (edge->callee->global.time + 1) +1, 100);
606 div *= benefitperc;
609 /* Decrease badness if call is nested. */
610 /* Compress the range so we don't overflow. */
611 if (div > 10000)
612 div = 10000 + ceil_log2 (div) - 8;
613 if (div < 1)
614 div = 1;
615 if (badness > 0)
616 badness /= div;
617 growth_for_all = cgraph_estimate_growth (edge->callee);
618 badness += growth_for_all;
619 if (badness > INT_MAX)
620 badness = INT_MAX;
621 if (dump)
623 fprintf (dump_file,
624 " %i: guessed profile. frequency %i, overall growth %i,"
625 " benefit %i%%, divisor %i\n",
626 (int) badness, edge->frequency, growth_for_all, benefitperc, div);
629 /* When function local profile is not available or it does not give
630 useful information (ie frequency is zero), base the cost on
631 loop nest and overall size growth, so we optimize for overall number
632 of functions fully inlined in program. */
633 else
635 int nest = MIN (edge->loop_nest, 8);
636 badness = cgraph_estimate_growth (edge->callee) * 256;
638 /* Decrease badness if call is nested. */
639 if (badness > 0)
640 badness >>= nest;
641 else
643 badness <<= nest;
645 if (dump)
646 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
647 nest);
650 /* Ensure that we did not overflow in all the fixed point math above. */
651 gcc_assert (badness >= INT_MIN);
652 gcc_assert (badness <= INT_MAX - 1);
653 /* Make recursive inlining happen always after other inlining is done. */
654 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
655 return badness + 1;
656 else
657 return badness;
660 /* Recompute heap nodes for each of caller edge. */
662 static void
663 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
664 bitmap updated_nodes)
666 struct cgraph_edge *edge;
667 cgraph_inline_failed_t failed_reason;
669 if (!node->local.inlinable || node->local.disregard_inline_limits
670 || node->global.inlined_to)
671 return;
672 if (bitmap_bit_p (updated_nodes, node->uid))
673 return;
674 bitmap_set_bit (updated_nodes, node->uid);
675 node->global.estimated_growth = INT_MIN;
677 if (!node->local.inlinable)
678 return;
679 /* Prune out edges we won't inline into anymore. */
680 if (!cgraph_default_inline_p (node, &failed_reason))
682 for (edge = node->callers; edge; edge = edge->next_caller)
683 if (edge->aux)
685 fibheap_delete_node (heap, (fibnode_t) edge->aux);
686 edge->aux = NULL;
687 if (edge->inline_failed)
688 edge->inline_failed = failed_reason;
690 return;
693 for (edge = node->callers; edge; edge = edge->next_caller)
694 if (edge->inline_failed)
696 int badness = cgraph_edge_badness (edge, false);
697 if (edge->aux)
699 fibnode_t n = (fibnode_t) edge->aux;
700 gcc_assert (n->data == edge);
701 if (n->key == badness)
702 continue;
704 /* fibheap_replace_key only increase the keys. */
705 if (badness < n->key)
707 fibheap_replace_key (heap, n, badness);
708 gcc_assert (n->key == badness);
709 continue;
711 fibheap_delete_node (heap, (fibnode_t) edge->aux);
713 edge->aux = fibheap_insert (heap, badness, edge);
717 /* Recompute heap nodes for each of caller edges of each of callees. */
719 static void
720 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
721 bitmap updated_nodes)
723 struct cgraph_edge *e;
724 node->global.estimated_growth = INT_MIN;
726 for (e = node->callees; e; e = e->next_callee)
727 if (e->inline_failed)
728 update_caller_keys (heap, e->callee, updated_nodes);
729 else if (!e->inline_failed)
730 update_callee_keys (heap, e->callee, updated_nodes);
733 /* Enqueue all recursive calls from NODE into priority queue depending on
734 how likely we want to recursively inline the call. */
736 static void
737 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
738 fibheap_t heap)
740 static int priority;
741 struct cgraph_edge *e;
742 for (e = where->callees; e; e = e->next_callee)
743 if (e->callee == node)
745 /* When profile feedback is available, prioritize by expected number
746 of calls. Without profile feedback we maintain simple queue
747 to order candidates via recursive depths. */
748 fibheap_insert (heap,
749 !max_count ? priority++
750 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
753 for (e = where->callees; e; e = e->next_callee)
754 if (!e->inline_failed)
755 lookup_recursive_calls (node, e->callee, heap);
758 /* Decide on recursive inlining: in the case function has recursive calls,
759 inline until body size reaches given argument. If any new indirect edges
760 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
761 is NULL. */
763 static bool
764 cgraph_decide_recursive_inlining (struct cgraph_node *node,
765 VEC (cgraph_edge_p, heap) **new_edges)
767 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
768 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
769 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
770 fibheap_t heap;
771 struct cgraph_edge *e;
772 struct cgraph_node *master_clone, *next;
773 int depth = 0;
774 int n = 0;
776 /* It does not make sense to recursively inline always-inline functions
777 as we are going to sorry() on the remaining calls anyway. */
778 if (node->local.disregard_inline_limits
779 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl)))
780 return false;
782 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
783 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
784 return false;
786 if (DECL_DECLARED_INLINE_P (node->decl))
788 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
789 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
792 /* Make sure that function is small enough to be considered for inlining. */
793 if (!max_depth
794 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
795 return false;
796 heap = fibheap_new ();
797 lookup_recursive_calls (node, node, heap);
798 if (fibheap_empty (heap))
800 fibheap_delete (heap);
801 return false;
804 if (dump_file)
805 fprintf (dump_file,
806 " Performing recursive inlining on %s\n",
807 cgraph_node_name (node));
809 /* We need original clone to copy around. */
810 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1,
811 false, NULL);
812 master_clone->needed = true;
813 for (e = master_clone->callees; e; e = e->next_callee)
814 if (!e->inline_failed)
815 cgraph_clone_inlined_nodes (e, true, false);
817 /* Do the inlining and update list of recursive call during process. */
818 while (!fibheap_empty (heap)
819 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
820 <= limit))
822 struct cgraph_edge *curr
823 = (struct cgraph_edge *) fibheap_extract_min (heap);
824 struct cgraph_node *cnode;
826 depth = 1;
827 for (cnode = curr->caller;
828 cnode->global.inlined_to; cnode = cnode->callers->caller)
829 if (node->decl == curr->callee->decl)
830 depth++;
831 if (depth > max_depth)
833 if (dump_file)
834 fprintf (dump_file,
835 " maximal depth reached\n");
836 continue;
839 if (max_count)
841 if (!cgraph_maybe_hot_edge_p (curr))
843 if (dump_file)
844 fprintf (dump_file, " Not inlining cold call\n");
845 continue;
847 if (curr->count * 100 / node->count < probability)
849 if (dump_file)
850 fprintf (dump_file,
851 " Probability of edge is too small\n");
852 continue;
856 if (dump_file)
858 fprintf (dump_file,
859 " Inlining call of depth %i", depth);
860 if (node->count)
862 fprintf (dump_file, " called approx. %.2f times per call",
863 (double)curr->count / node->count);
865 fprintf (dump_file, "\n");
867 cgraph_redirect_edge_callee (curr, master_clone);
868 cgraph_mark_inline_edge (curr, false, new_edges);
869 lookup_recursive_calls (node, curr->callee, heap);
870 n++;
872 if (!fibheap_empty (heap) && dump_file)
873 fprintf (dump_file, " Recursive inlining growth limit met.\n");
875 fibheap_delete (heap);
876 if (dump_file)
877 fprintf (dump_file,
878 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
879 master_clone->global.size, node->global.size,
880 master_clone->global.time, node->global.time);
882 /* Remove master clone we used for inlining. We rely that clones inlined
883 into master clone gets queued just before master clone so we don't
884 need recursion. */
885 for (node = cgraph_nodes; node != master_clone;
886 node = next)
888 next = node->next;
889 if (node->global.inlined_to == master_clone)
890 cgraph_remove_node (node);
892 cgraph_remove_node (master_clone);
893 /* FIXME: Recursive inlining actually reduces number of calls of the
894 function. At this place we should probably walk the function and
895 inline clones and compensate the counts accordingly. This probably
896 doesn't matter much in practice. */
897 return n > 0;
900 /* Set inline_failed for all callers of given function to REASON. */
902 static void
903 cgraph_set_inline_failed (struct cgraph_node *node,
904 cgraph_inline_failed_t reason)
906 struct cgraph_edge *e;
908 if (dump_file)
909 fprintf (dump_file, "Inlining failed: %s\n",
910 cgraph_inline_failed_string (reason));
911 for (e = node->callers; e; e = e->next_caller)
912 if (e->inline_failed)
913 e->inline_failed = reason;
916 /* Given whole compilation unit estimate of INSNS, compute how large we can
917 allow the unit to grow. */
918 static int
919 compute_max_insns (int insns)
921 int max_insns = insns;
922 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
923 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
925 return ((HOST_WIDEST_INT) max_insns
926 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
929 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
930 static void
931 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
933 while (VEC_length (cgraph_edge_p, new_edges) > 0)
935 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
937 gcc_assert (!edge->aux);
938 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
943 /* We use greedy algorithm for inlining of small functions:
944 All inline candidates are put into prioritized heap based on estimated
945 growth of the overall number of instructions and then update the estimates.
947 INLINED and INLINED_CALEES are just pointers to arrays large enough
948 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
950 static void
951 cgraph_decide_inlining_of_small_functions (void)
953 struct cgraph_node *node;
954 struct cgraph_edge *edge;
955 cgraph_inline_failed_t failed_reason;
956 fibheap_t heap = fibheap_new ();
957 bitmap updated_nodes = BITMAP_ALLOC (NULL);
958 int min_size, max_size;
959 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
961 if (flag_indirect_inlining)
962 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
964 if (dump_file)
965 fprintf (dump_file, "\nDeciding on smaller functions:\n");
967 /* Put all inline candidates into the heap. */
969 for (node = cgraph_nodes; node; node = node->next)
971 if (!node->local.inlinable || !node->callers)
972 continue;
973 if (dump_file)
974 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
976 node->global.estimated_growth = INT_MIN;
977 if (!cgraph_default_inline_p (node, &failed_reason))
979 cgraph_set_inline_failed (node, failed_reason);
980 continue;
983 for (edge = node->callers; edge; edge = edge->next_caller)
984 if (edge->inline_failed)
986 gcc_assert (!edge->aux);
987 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
991 max_size = compute_max_insns (overall_size);
992 min_size = overall_size;
994 while (overall_size <= max_size
995 && !fibheap_empty (heap))
997 int old_size = overall_size;
998 struct cgraph_node *where, *callee;
999 int badness = fibheap_min_key (heap);
1000 int growth;
1001 cgraph_inline_failed_t not_good = CIF_OK;
1003 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1004 gcc_assert (edge->aux);
1005 edge->aux = NULL;
1006 if (!edge->inline_failed)
1007 continue;
1008 #ifdef ENABLE_CHECKING
1009 gcc_assert (cgraph_edge_badness (edge, false) == badness);
1010 #endif
1011 callee = edge->callee;
1013 growth = (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
1014 - edge->caller->global.size);
1016 if (dump_file)
1018 fprintf (dump_file,
1019 "\nConsidering %s with %i size\n",
1020 cgraph_node_name (edge->callee),
1021 edge->callee->global.size);
1022 fprintf (dump_file,
1023 " to be inlined into %s in %s:%i\n"
1024 " Estimated growth after inlined into all callees is %+i insns.\n"
1025 " Estimated badness is %i, frequency %.2f.\n",
1026 cgraph_node_name (edge->caller),
1027 flag_wpa ? "unknown"
1028 : gimple_filename ((const_gimple) edge->call_stmt),
1029 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1030 cgraph_estimate_growth (edge->callee),
1031 badness,
1032 edge->frequency / (double)CGRAPH_FREQ_BASE);
1033 if (edge->count)
1034 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1035 if (dump_flags & TDF_DETAILS)
1036 cgraph_edge_badness (edge, true);
1039 /* When not having profile info ready we don't weight by any way the
1040 position of call in procedure itself. This means if call of
1041 function A from function B seems profitable to inline, the recursive
1042 call of function A in inline copy of A in B will look profitable too
1043 and we end up inlining until reaching maximal function growth. This
1044 is not good idea so prohibit the recursive inlining.
1046 ??? When the frequencies are taken into account we might not need this
1047 restriction.
1049 We need to be cureful here, in some testcases, e.g. directivec.c in
1050 libcpp, we can estimate self recursive function to have negative growth
1051 for inlining completely.
1053 if (!edge->count)
1055 where = edge->caller;
1056 while (where->global.inlined_to)
1058 if (where->decl == edge->callee->decl)
1059 break;
1060 where = where->callers->caller;
1062 if (where->global.inlined_to)
1064 edge->inline_failed
1065 = (edge->callee->local.disregard_inline_limits
1066 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1067 if (dump_file)
1068 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
1069 continue;
1073 if (!cgraph_maybe_hot_edge_p (edge))
1074 not_good = CIF_UNLIKELY_CALL;
1075 if (!flag_inline_functions
1076 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
1077 not_good = CIF_NOT_DECLARED_INLINED;
1078 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
1079 not_good = CIF_OPTIMIZING_FOR_SIZE;
1080 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
1082 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1083 &edge->inline_failed))
1085 edge->inline_failed = not_good;
1086 if (dump_file)
1087 fprintf (dump_file, " inline_failed:%s.\n",
1088 cgraph_inline_failed_string (edge->inline_failed));
1090 continue;
1092 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1094 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1095 &edge->inline_failed))
1097 if (dump_file)
1098 fprintf (dump_file, " inline_failed:%s.\n",
1099 cgraph_inline_failed_string (edge->inline_failed));
1101 continue;
1103 if (!tree_can_inline_p (edge))
1105 if (dump_file)
1106 fprintf (dump_file, " inline_failed:%s.\n",
1107 cgraph_inline_failed_string (edge->inline_failed));
1108 continue;
1110 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1111 &edge->inline_failed))
1113 where = edge->caller;
1114 if (where->global.inlined_to)
1115 where = where->global.inlined_to;
1116 if (!cgraph_decide_recursive_inlining (where,
1117 flag_indirect_inlining
1118 ? &new_indirect_edges : NULL))
1119 continue;
1120 if (flag_indirect_inlining)
1121 add_new_edges_to_heap (heap, new_indirect_edges);
1122 update_callee_keys (heap, where, updated_nodes);
1124 else
1126 struct cgraph_node *callee;
1127 if (edge->call_stmt_cannot_inline_p
1128 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1129 &edge->inline_failed, true))
1131 if (dump_file)
1132 fprintf (dump_file, " Not inlining into %s:%s.\n",
1133 cgraph_node_name (edge->caller),
1134 cgraph_inline_failed_string (edge->inline_failed));
1135 continue;
1137 callee = edge->callee;
1138 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1139 if (flag_indirect_inlining)
1140 add_new_edges_to_heap (heap, new_indirect_edges);
1142 update_callee_keys (heap, callee, updated_nodes);
1144 where = edge->caller;
1145 if (where->global.inlined_to)
1146 where = where->global.inlined_to;
1148 /* Our profitability metric can depend on local properties
1149 such as number of inlinable calls and size of the function body.
1150 After inlining these properties might change for the function we
1151 inlined into (since it's body size changed) and for the functions
1152 called by function we inlined (since number of it inlinable callers
1153 might change). */
1154 update_caller_keys (heap, where, updated_nodes);
1156 /* We removed one call of the function we just inlined. If offline
1157 copy is still needed, be sure to update the keys. */
1158 if (callee != where && !callee->global.inlined_to)
1159 update_caller_keys (heap, callee, updated_nodes);
1160 bitmap_clear (updated_nodes);
1162 if (dump_file)
1164 fprintf (dump_file,
1165 " Inlined into %s which now has size %i and self time %i,"
1166 "net change of %+i.\n",
1167 cgraph_node_name (edge->caller),
1168 edge->caller->global.time,
1169 edge->caller->global.size,
1170 overall_size - old_size);
1172 if (min_size > overall_size)
1174 min_size = overall_size;
1175 max_size = compute_max_insns (min_size);
1177 if (dump_file)
1178 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1181 while (!fibheap_empty (heap))
1183 int badness = fibheap_min_key (heap);
1185 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1186 gcc_assert (edge->aux);
1187 edge->aux = NULL;
1188 if (!edge->inline_failed)
1189 continue;
1190 #ifdef ENABLE_CHECKING
1191 gcc_assert (cgraph_edge_badness (edge, false) == badness);
1192 #endif
1193 if (dump_file)
1195 fprintf (dump_file,
1196 "\nSkipping %s with %i size\n",
1197 cgraph_node_name (edge->callee),
1198 edge->callee->global.size);
1199 fprintf (dump_file,
1200 " called by %s in %s:%i\n"
1201 " Estimated growth after inlined into all callees is %+i insns.\n"
1202 " Estimated badness is %i, frequency %.2f.\n",
1203 cgraph_node_name (edge->caller),
1204 flag_wpa ? "unknown"
1205 : gimple_filename ((const_gimple) edge->call_stmt),
1206 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1207 cgraph_estimate_growth (edge->callee),
1208 badness,
1209 edge->frequency / (double)CGRAPH_FREQ_BASE);
1210 if (edge->count)
1211 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1212 if (dump_flags & TDF_DETAILS)
1213 cgraph_edge_badness (edge, true);
1215 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1216 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1217 &edge->inline_failed))
1218 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1221 if (new_indirect_edges)
1222 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1223 fibheap_delete (heap);
1224 BITMAP_FREE (updated_nodes);
1227 /* Flatten NODE from the IPA inliner. */
1229 static void
1230 cgraph_flatten (struct cgraph_node *node)
1232 struct cgraph_edge *e;
1234 /* We shouldn't be called recursively when we are being processed. */
1235 gcc_assert (node->aux == NULL);
1237 node->aux = (void *)(size_t) INLINE_ALL;
1239 for (e = node->callees; e; e = e->next_callee)
1241 struct cgraph_node *orig_callee;
1243 if (e->call_stmt_cannot_inline_p)
1244 continue;
1246 if (!e->callee->analyzed)
1248 if (dump_file)
1249 fprintf (dump_file,
1250 "Not inlining: Function body not available.\n");
1251 continue;
1254 /* We've hit cycle? It is time to give up. */
1255 if (e->callee->aux)
1257 if (dump_file)
1258 fprintf (dump_file,
1259 "Not inlining %s into %s to avoid cycle.\n",
1260 cgraph_node_name (e->callee),
1261 cgraph_node_name (e->caller));
1262 e->inline_failed = CIF_RECURSIVE_INLINING;
1263 continue;
1266 /* When the edge is already inlined, we just need to recurse into
1267 it in order to fully flatten the leaves. */
1268 if (!e->inline_failed)
1270 cgraph_flatten (e->callee);
1271 continue;
1274 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1276 if (dump_file)
1277 fprintf (dump_file, "Not inlining: recursive call.\n");
1278 continue;
1281 if (!tree_can_inline_p (e))
1283 if (dump_file)
1284 fprintf (dump_file, "Not inlining: %s",
1285 cgraph_inline_failed_string (e->inline_failed));
1286 continue;
1289 /* Inline the edge and flatten the inline clone. Avoid
1290 recursing through the original node if the node was cloned. */
1291 if (dump_file)
1292 fprintf (dump_file, " Inlining %s into %s.\n",
1293 cgraph_node_name (e->callee),
1294 cgraph_node_name (e->caller));
1295 orig_callee = e->callee;
1296 cgraph_mark_inline_edge (e, true, NULL);
1297 if (e->callee != orig_callee)
1298 orig_callee->aux = (void *)(size_t) INLINE_ALL;
1299 cgraph_flatten (e->callee);
1300 if (e->callee != orig_callee)
1301 orig_callee->aux = NULL;
1304 node->aux = NULL;
1307 /* Decide on the inlining. We do so in the topological order to avoid
1308 expenses on updating data structures. */
1310 static unsigned int
1311 cgraph_decide_inlining (void)
1313 struct cgraph_node *node;
1314 int nnodes;
1315 struct cgraph_node **order =
1316 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1317 int old_size = 0;
1318 int i;
1319 int initial_size = 0;
1321 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1322 if (in_lto_p && flag_indirect_inlining)
1323 ipa_update_after_lto_read ();
1325 max_count = 0;
1326 max_benefit = 0;
1327 for (node = cgraph_nodes; node; node = node->next)
1328 if (node->analyzed)
1330 struct cgraph_edge *e;
1332 gcc_assert (inline_summary (node)->self_size == node->global.size);
1333 initial_size += node->global.size;
1334 for (e = node->callees; e; e = e->next_callee)
1335 if (max_count < e->count)
1336 max_count = e->count;
1337 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1338 max_benefit = inline_summary (node)->time_inlining_benefit;
1340 gcc_assert (in_lto_p
1341 || !max_count
1342 || (profile_info && flag_branch_probabilities));
1343 overall_size = initial_size;
1345 nnodes = cgraph_postorder (order);
1347 if (dump_file)
1348 fprintf (dump_file,
1349 "\nDeciding on inlining. Starting with size %i.\n",
1350 initial_size);
1352 for (node = cgraph_nodes; node; node = node->next)
1353 node->aux = 0;
1355 if (dump_file)
1356 fprintf (dump_file, "\nFlattening functions:\n");
1358 /* In the first pass handle functions to be flattened. Do this with
1359 a priority so none of our later choices will make this impossible. */
1360 for (i = nnodes - 1; i >= 0; i--)
1362 node = order[i];
1364 /* Handle nodes to be flattened, but don't update overall unit
1365 size. Calling the incremental inliner here is lame,
1366 a simple worklist should be enough. What should be left
1367 here from the early inliner (if it runs) is cyclic cases.
1368 Ideally when processing callees we stop inlining at the
1369 entry of cycles, possibly cloning that entry point and
1370 try to flatten itself turning it into a self-recursive
1371 function. */
1372 if (lookup_attribute ("flatten",
1373 DECL_ATTRIBUTES (node->decl)) != NULL)
1375 if (dump_file)
1376 fprintf (dump_file,
1377 "Flattening %s\n", cgraph_node_name (node));
1378 cgraph_flatten (node);
1382 cgraph_decide_inlining_of_small_functions ();
1384 if (flag_inline_functions_called_once)
1386 if (dump_file)
1387 fprintf (dump_file, "\nDeciding on functions called once:\n");
1389 /* And finally decide what functions are called once. */
1390 for (i = nnodes - 1; i >= 0; i--)
1392 node = order[i];
1394 if (node->callers
1395 && !node->callers->next_caller
1396 && cgraph_only_called_directly_p (node)
1397 && node->local.inlinable
1398 && node->callers->inline_failed
1399 && node->callers->caller != node
1400 && node->callers->caller->global.inlined_to != node
1401 && !node->callers->call_stmt_cannot_inline_p
1402 && !DECL_EXTERNAL (node->decl)
1403 && !DECL_COMDAT (node->decl))
1405 cgraph_inline_failed_t reason;
1406 old_size = overall_size;
1407 if (dump_file)
1409 fprintf (dump_file,
1410 "\nConsidering %s size %i.\n",
1411 cgraph_node_name (node), node->global.size);
1412 fprintf (dump_file,
1413 " Called once from %s %i insns.\n",
1414 cgraph_node_name (node->callers->caller),
1415 node->callers->caller->global.size);
1418 if (cgraph_check_inline_limits (node->callers->caller, node,
1419 &reason, false))
1421 struct cgraph_node *caller = node->callers->caller;
1422 cgraph_mark_inline (node->callers);
1423 if (dump_file)
1424 fprintf (dump_file,
1425 " Inlined into %s which now has %i size"
1426 " for a net change of %+i size.\n",
1427 cgraph_node_name (caller),
1428 caller->global.size,
1429 overall_size - old_size);
1431 else
1433 if (dump_file)
1434 fprintf (dump_file,
1435 " Not inlining: %s.\n",
1436 cgraph_inline_failed_string (reason));
1442 /* Free ipa-prop structures if they are no longer needed. */
1443 if (flag_indirect_inlining)
1444 free_all_ipa_structures_after_iinln ();
1446 if (dump_file)
1447 fprintf (dump_file,
1448 "\nInlined %i calls, eliminated %i functions, "
1449 "size %i turned to %i size.\n\n",
1450 ncalls_inlined, nfunctions_inlined, initial_size,
1451 overall_size);
1452 free (order);
1453 return 0;
1456 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1457 in leaf functions. */
1458 static bool
1459 leaf_node_p (struct cgraph_node *n)
1461 struct cgraph_edge *e;
1462 for (e = n->callees; e; e = e->next_callee)
1463 if (!DECL_BUILT_IN (e->callee->decl)
1464 || (!TREE_READONLY (e->callee->decl)
1465 || DECL_PURE_P (e->callee->decl)))
1466 return false;
1467 return true;
1470 /* Decide on the inlining. We do so in the topological order to avoid
1471 expenses on updating data structures. */
1473 static bool
1474 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1475 enum inlining_mode mode)
1477 struct cgraph_edge *e;
1478 bool inlined = false;
1479 cgraph_inline_failed_t failed_reason;
1481 #ifdef ENABLE_CHECKING
1482 verify_cgraph_node (node);
1483 #endif
1485 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1486 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1488 if (dump_file)
1489 fprintf (dump_file, "Incrementally flattening %s\n",
1490 cgraph_node_name (node));
1491 mode = INLINE_ALL;
1494 /* First of all look for always inline functions. */
1495 if (mode != INLINE_SIZE_NORECURSIVE)
1496 for (e = node->callees; e; e = e->next_callee)
1498 if (!e->callee->local.disregard_inline_limits
1499 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1500 continue;
1501 if (e->call_stmt_cannot_inline_p)
1502 continue;
1503 if (dump_file)
1504 fprintf (dump_file,
1505 "Considering to always inline inline candidate %s.\n",
1506 cgraph_node_name (e->callee));
1507 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1509 if (dump_file)
1510 fprintf (dump_file, "Not inlining: recursive call.\n");
1511 continue;
1513 if (!tree_can_inline_p (e))
1515 if (dump_file)
1516 fprintf (dump_file,
1517 "Not inlining: %s",
1518 cgraph_inline_failed_string (e->inline_failed));
1519 continue;
1521 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1522 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1524 if (dump_file)
1525 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1526 continue;
1528 if (!e->callee->analyzed)
1530 if (dump_file)
1531 fprintf (dump_file,
1532 "Not inlining: Function body no longer available.\n");
1533 continue;
1536 if (dump_file)
1537 fprintf (dump_file, " Inlining %s into %s.\n",
1538 cgraph_node_name (e->callee),
1539 cgraph_node_name (e->caller));
1540 cgraph_mark_inline (e);
1541 inlined = true;
1544 /* Now do the automatic inlining. */
1545 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE
1546 /* Never inline regular functions into always-inline functions
1547 during incremental inlining. */
1548 && !node->local.disregard_inline_limits)
1550 bitmap visited = BITMAP_ALLOC (NULL);
1551 for (e = node->callees; e; e = e->next_callee)
1553 int allowed_growth = 0;
1554 if (!e->callee->local.inlinable
1555 || !e->inline_failed
1556 || e->callee->local.disregard_inline_limits)
1557 continue;
1558 /* We are inlining a function to all call-sites in node
1559 or to none. So visit each candidate only once. */
1560 if (!bitmap_set_bit (visited, e->callee->uid))
1561 continue;
1562 if (dump_file)
1563 fprintf (dump_file, "Considering inline candidate %s.\n",
1564 cgraph_node_name (e->callee));
1565 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1567 if (dump_file)
1568 fprintf (dump_file, "Not inlining: recursive call.\n");
1569 continue;
1571 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1572 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1574 if (dump_file)
1575 fprintf (dump_file,
1576 "Not inlining: SSA form does not match.\n");
1577 continue;
1580 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1581 && optimize_function_for_speed_p (cfun))
1582 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1584 /* When the function body would grow and inlining the function
1585 won't eliminate the need for offline copy of the function,
1586 don't inline. */
1587 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1588 || (!flag_inline_functions
1589 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1590 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1591 > e->caller->global.size + allowed_growth)
1592 && cgraph_estimate_growth (e->callee) > allowed_growth)
1594 if (dump_file)
1595 fprintf (dump_file,
1596 "Not inlining: code size would grow by %i.\n",
1597 cgraph_estimate_size_after_inlining (1, e->caller,
1598 e->callee)
1599 - e->caller->global.size);
1600 continue;
1602 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1603 false)
1604 || e->call_stmt_cannot_inline_p)
1606 if (dump_file)
1607 fprintf (dump_file, "Not inlining: %s.\n",
1608 cgraph_inline_failed_string (e->inline_failed));
1609 continue;
1611 if (!e->callee->analyzed)
1613 if (dump_file)
1614 fprintf (dump_file,
1615 "Not inlining: Function body no longer available.\n");
1616 continue;
1618 if (!tree_can_inline_p (e))
1620 if (dump_file)
1621 fprintf (dump_file,
1622 "Not inlining: %s.",
1623 cgraph_inline_failed_string (e->inline_failed));
1624 continue;
1626 if (cgraph_default_inline_p (e->callee, &failed_reason))
1628 if (dump_file)
1629 fprintf (dump_file, " Inlining %s into %s.\n",
1630 cgraph_node_name (e->callee),
1631 cgraph_node_name (e->caller));
1632 cgraph_mark_inline (e);
1633 inlined = true;
1636 BITMAP_FREE (visited);
1638 return inlined;
1641 /* Because inlining might remove no-longer reachable nodes, we need to
1642 keep the array visible to garbage collector to avoid reading collected
1643 out nodes. */
1644 static int nnodes;
1645 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1647 /* Do inlining of small functions. Doing so early helps profiling and other
1648 passes to be somewhat more effective and avoids some code duplication in
1649 later real inlining pass for testcases with very many function calls. */
1650 static unsigned int
1651 cgraph_early_inlining (void)
1653 struct cgraph_node *node = cgraph_node (current_function_decl);
1654 unsigned int todo = 0;
1655 int iterations = 0;
1657 if (sorrycount || errorcount)
1658 return 0;
1660 if (!optimize
1661 || flag_no_inline
1662 || !flag_early_inlining)
1664 /* When not optimizing or not inlining inline only always-inline
1665 functions. */
1666 cgraph_decide_inlining_incrementally (node, INLINE_ALWAYS_INLINE);
1667 timevar_push (TV_INTEGRATION);
1668 todo |= optimize_inline_calls (current_function_decl);
1669 timevar_pop (TV_INTEGRATION);
1671 else
1673 if (lookup_attribute ("flatten",
1674 DECL_ATTRIBUTES (node->decl)) != NULL)
1676 if (dump_file)
1677 fprintf (dump_file,
1678 "Flattening %s\n", cgraph_node_name (node));
1679 cgraph_flatten (node);
1680 timevar_push (TV_INTEGRATION);
1681 todo |= optimize_inline_calls (current_function_decl);
1682 timevar_pop (TV_INTEGRATION);
1684 /* We iterate incremental inlining to get trivial cases of indirect
1685 inlining. */
1686 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1687 && cgraph_decide_inlining_incrementally (node,
1688 iterations
1689 ? INLINE_SIZE_NORECURSIVE
1690 : INLINE_SIZE))
1692 timevar_push (TV_INTEGRATION);
1693 todo |= optimize_inline_calls (current_function_decl);
1694 iterations++;
1695 timevar_pop (TV_INTEGRATION);
1697 if (dump_file)
1698 fprintf (dump_file, "Iterations: %i\n", iterations);
1701 cfun->always_inline_functions_inlined = true;
1703 return todo;
1706 struct gimple_opt_pass pass_early_inline =
1709 GIMPLE_PASS,
1710 "einline", /* name */
1711 NULL, /* gate */
1712 cgraph_early_inlining, /* execute */
1713 NULL, /* sub */
1714 NULL, /* next */
1715 0, /* static_pass_number */
1716 TV_INLINE_HEURISTICS, /* tv_id */
1717 0, /* properties_required */
1718 0, /* properties_provided */
1719 0, /* properties_destroyed */
1720 0, /* todo_flags_start */
1721 TODO_dump_func /* todo_flags_finish */
1725 /* When inlining shall be performed. */
1726 static bool
1727 cgraph_gate_ipa_early_inlining (void)
1729 return (flag_early_inlining
1730 && !in_lto_p
1731 && (flag_branch_probabilities || flag_test_coverage
1732 || profile_arc_flag));
1735 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1736 before tree profiling so we have stand alone IPA pass for doing so. */
1737 struct simple_ipa_opt_pass pass_ipa_early_inline =
1740 SIMPLE_IPA_PASS,
1741 "einline_ipa", /* name */
1742 cgraph_gate_ipa_early_inlining, /* gate */
1743 NULL, /* execute */
1744 NULL, /* sub */
1745 NULL, /* next */
1746 0, /* static_pass_number */
1747 TV_INLINE_HEURISTICS, /* tv_id */
1748 0, /* properties_required */
1749 0, /* properties_provided */
1750 0, /* properties_destroyed */
1751 0, /* todo_flags_start */
1752 TODO_dump_cgraph /* todo_flags_finish */
1756 /* See if statement might disappear after inlining. We are not terribly
1757 sophisficated, basically looking for simple abstraction penalty wrappers. */
1759 static bool
1760 likely_eliminated_by_inlining_p (gimple stmt)
1762 enum gimple_code code = gimple_code (stmt);
1763 switch (code)
1765 case GIMPLE_RETURN:
1766 return true;
1767 case GIMPLE_ASSIGN:
1768 if (gimple_num_ops (stmt) != 2)
1769 return false;
1771 /* Casts of parameters, loads from parameters passed by reference
1772 and stores to return value or parameters are probably free after
1773 inlining. */
1774 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1775 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1776 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1777 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1779 tree rhs = gimple_assign_rhs1 (stmt);
1780 tree lhs = gimple_assign_lhs (stmt);
1781 tree inner_rhs = rhs;
1782 tree inner_lhs = lhs;
1783 bool rhs_free = false;
1784 bool lhs_free = false;
1786 while (handled_component_p (inner_lhs) || TREE_CODE (inner_lhs) == INDIRECT_REF)
1787 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1788 while (handled_component_p (inner_rhs)
1789 || TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
1790 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1793 if (TREE_CODE (inner_rhs) == PARM_DECL
1794 || (TREE_CODE (inner_rhs) == SSA_NAME
1795 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1796 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1797 rhs_free = true;
1798 if (rhs_free && is_gimple_reg (lhs))
1799 lhs_free = true;
1800 if (((TREE_CODE (inner_lhs) == PARM_DECL
1801 || (TREE_CODE (inner_lhs) == SSA_NAME
1802 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1803 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1804 && inner_lhs != lhs)
1805 || TREE_CODE (inner_lhs) == RESULT_DECL
1806 || (TREE_CODE (inner_lhs) == SSA_NAME
1807 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1808 lhs_free = true;
1809 if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1810 rhs_free = true;
1811 if (lhs_free && rhs_free)
1812 return true;
1814 return false;
1815 default:
1816 return false;
1820 /* Compute function body size parameters for NODE. */
1822 static void
1823 estimate_function_body_sizes (struct cgraph_node *node)
1825 gcov_type time = 0;
1826 gcov_type time_inlining_benefit = 0;
1827 int size = 0;
1828 int size_inlining_benefit = 0;
1829 basic_block bb;
1830 gimple_stmt_iterator bsi;
1831 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1832 tree arg;
1833 int freq;
1834 tree funtype = TREE_TYPE (node->decl);
1836 if (node->local.disregard_inline_limits)
1838 inline_summary (node)->self_time = 0;
1839 inline_summary (node)->self_size = 0;
1840 inline_summary (node)->time_inlining_benefit = 0;
1841 inline_summary (node)->size_inlining_benefit = 0;
1844 if (dump_file)
1845 fprintf (dump_file, "Analyzing function body size: %s\n",
1846 cgraph_node_name (node));
1848 gcc_assert (my_function && my_function->cfg);
1849 FOR_EACH_BB_FN (bb, my_function)
1851 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1852 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1854 gimple stmt = gsi_stmt (bsi);
1855 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1856 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1858 if (dump_file && (dump_flags & TDF_DETAILS))
1860 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1861 freq, this_size, this_time);
1862 print_gimple_stmt (dump_file, stmt, 0, 0);
1864 this_time *= freq;
1865 time += this_time;
1866 size += this_size;
1867 if (likely_eliminated_by_inlining_p (stmt))
1869 size_inlining_benefit += this_size;
1870 time_inlining_benefit += this_time;
1871 if (dump_file && (dump_flags & TDF_DETAILS))
1872 fprintf (dump_file, " Likely eliminated\n");
1874 gcc_assert (time >= 0);
1875 gcc_assert (size >= 0);
1878 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1879 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1880 / CGRAPH_FREQ_BASE);
1881 if (dump_file)
1882 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1883 (int)time, (int)time_inlining_benefit,
1884 size, size_inlining_benefit);
1885 time_inlining_benefit += eni_time_weights.call_cost;
1886 size_inlining_benefit += eni_size_weights.call_cost;
1887 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1889 int cost = estimate_move_cost (TREE_TYPE (funtype));
1890 time_inlining_benefit += cost;
1891 size_inlining_benefit += cost;
1893 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
1894 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1896 int cost = estimate_move_cost (TREE_TYPE (arg));
1897 time_inlining_benefit += cost;
1898 size_inlining_benefit += cost;
1900 if (time_inlining_benefit > MAX_TIME)
1901 time_inlining_benefit = MAX_TIME;
1902 if (time > MAX_TIME)
1903 time = MAX_TIME;
1904 inline_summary (node)->self_time = time;
1905 inline_summary (node)->self_size = size;
1906 if (dump_file)
1907 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
1908 (int)time, (int)time_inlining_benefit,
1909 size, size_inlining_benefit);
1910 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
1911 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
1914 /* Compute parameters of functions used by inliner. */
1915 unsigned int
1916 compute_inline_parameters (struct cgraph_node *node)
1918 HOST_WIDE_INT self_stack_size;
1920 gcc_assert (!node->global.inlined_to);
1922 /* Estimate the stack size for the function. But not at -O0
1923 because estimated_stack_frame_size is a quadratic problem. */
1924 self_stack_size = optimize ? estimated_stack_frame_size () : 0;
1925 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1926 node->global.estimated_stack_size = self_stack_size;
1927 node->global.stack_frame_offset = 0;
1929 /* Can this function be inlined at all? */
1930 node->local.inlinable = tree_inlinable_function_p (node->decl);
1931 if (node->local.inlinable && !node->local.disregard_inline_limits)
1932 node->local.disregard_inline_limits
1933 = DECL_DISREGARD_INLINE_LIMITS (node->decl);
1934 estimate_function_body_sizes (node);
1935 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1936 node->global.time = inline_summary (node)->self_time;
1937 node->global.size = inline_summary (node)->self_size;
1938 return 0;
1942 /* Compute parameters of functions used by inliner using
1943 current_function_decl. */
1944 static unsigned int
1945 compute_inline_parameters_for_current (void)
1947 compute_inline_parameters (cgraph_node (current_function_decl));
1948 return 0;
1951 struct gimple_opt_pass pass_inline_parameters =
1954 GIMPLE_PASS,
1955 "inline_param", /* name */
1956 NULL, /* gate */
1957 compute_inline_parameters_for_current,/* execute */
1958 NULL, /* sub */
1959 NULL, /* next */
1960 0, /* static_pass_number */
1961 TV_INLINE_HEURISTICS, /* tv_id */
1962 0, /* properties_required */
1963 0, /* properties_provided */
1964 0, /* properties_destroyed */
1965 0, /* todo_flags_start */
1966 0 /* todo_flags_finish */
1970 /* This function performs intraprocedural analyzis in NODE that is required to
1971 inline indirect calls. */
1972 static void
1973 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
1975 struct cgraph_edge *cs;
1977 if (!flag_ipa_cp)
1979 ipa_initialize_node_params (node);
1980 ipa_detect_param_modifications (node);
1982 ipa_analyze_params_uses (node);
1984 if (!flag_ipa_cp)
1985 for (cs = node->callees; cs; cs = cs->next_callee)
1987 ipa_count_arguments (cs);
1988 ipa_compute_jump_functions (cs);
1991 if (dump_file)
1993 ipa_print_node_params (dump_file, node);
1994 ipa_print_node_jump_functions (dump_file, node);
1998 /* Note function body size. */
1999 static void
2000 analyze_function (struct cgraph_node *node)
2002 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2003 current_function_decl = node->decl;
2005 compute_inline_parameters (node);
2006 if (flag_indirect_inlining)
2007 inline_indirect_intraprocedural_analysis (node);
2009 current_function_decl = NULL;
2010 pop_cfun ();
2013 /* Called when new function is inserted to callgraph late. */
2014 static void
2015 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
2017 analyze_function (node);
2020 /* Note function body size. */
2021 static void
2022 inline_generate_summary (void)
2024 struct cgraph_node *node;
2026 function_insertion_hook_holder =
2027 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2029 if (flag_indirect_inlining)
2031 ipa_register_cgraph_hooks ();
2032 ipa_check_create_node_params ();
2033 ipa_check_create_edge_args ();
2036 for (node = cgraph_nodes; node; node = node->next)
2037 if (node->analyzed)
2038 analyze_function (node);
2040 return;
2043 /* Apply inline plan to function. */
2044 static unsigned int
2045 inline_transform (struct cgraph_node *node)
2047 unsigned int todo = 0;
2048 struct cgraph_edge *e;
2050 /* FIXME: Currently the passmanager is adding inline transform more than once to some
2051 clones. This needs revisiting after WPA cleanups. */
2052 if (cfun->after_inlining)
2053 return 0;
2055 /* We might need the body of this function so that we can expand
2056 it inline somewhere else. */
2057 if (cgraph_preserve_function_body_p (node->decl))
2058 save_inline_function_body (node);
2060 for (e = node->callees; e; e = e->next_callee)
2061 if (!e->inline_failed || warn_inline)
2062 break;
2064 if (e)
2066 timevar_push (TV_INTEGRATION);
2067 todo = optimize_inline_calls (current_function_decl);
2068 timevar_pop (TV_INTEGRATION);
2070 cfun->always_inline_functions_inlined = true;
2071 cfun->after_inlining = true;
2072 return todo | execute_fixup_cfg ();
2075 /* Read inline summary. Jump functions are shared among ipa-cp
2076 and inliner, so when ipa-cp is active, we don't need to write them
2077 twice. */
2079 static void
2080 inline_read_summary (void)
2082 if (flag_indirect_inlining)
2084 ipa_register_cgraph_hooks ();
2085 if (!flag_ipa_cp)
2086 ipa_prop_read_jump_functions ();
2088 function_insertion_hook_holder =
2089 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2092 /* Write inline summary for node in SET.
2093 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
2094 active, we don't need to write them twice. */
2096 static void
2097 inline_write_summary (cgraph_node_set set)
2099 if (flag_indirect_inlining && !flag_ipa_cp)
2100 ipa_prop_write_jump_functions (set);
2103 /* When to run IPA inlining. Inlining of always-inline functions
2104 happens during early inlining. */
2106 static bool
2107 gate_cgraph_decide_inlining (void)
2109 /* ??? We'd like to skip this if not optimizing or not inlining as
2110 all always-inline functions have been processed by early
2111 inlining already. But this at least breaks EH with C++ as
2112 we need to unconditionally run fixup_cfg even at -O0.
2113 So leave it on unconditionally for now. */
2114 return 1;
2117 struct ipa_opt_pass_d pass_ipa_inline =
2120 IPA_PASS,
2121 "inline", /* name */
2122 gate_cgraph_decide_inlining, /* gate */
2123 cgraph_decide_inlining, /* execute */
2124 NULL, /* sub */
2125 NULL, /* next */
2126 0, /* static_pass_number */
2127 TV_INLINE_HEURISTICS, /* tv_id */
2128 0, /* properties_required */
2129 0, /* properties_provided */
2130 0, /* properties_destroyed */
2131 TODO_remove_functions, /* todo_flags_finish */
2132 TODO_dump_cgraph | TODO_dump_func
2133 | TODO_remove_functions /* todo_flags_finish */
2135 inline_generate_summary, /* generate_summary */
2136 inline_write_summary, /* write_summary */
2137 inline_read_summary, /* read_summary */
2138 NULL, /* write_optimization_summary */
2139 NULL, /* read_optimization_summary */
2140 lto_ipa_fixup_call_notes, /* stmt_fixup */
2141 0, /* TODOs */
2142 inline_transform, /* function_transform */
2143 NULL, /* variable_transform */
2147 #include "gt-ipa-inline.h"