Fix change log
[official-gcc.git] / gcc / ipa-inline.c
blob8d9db0b16e3e50b1812a65ee744c99af061d1975
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
29 There are three major parts of this file:
31 cgraph_mark_inline implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
43 inlining heuristics
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used by early inliner.
66 The inliner itself is split into several passes:
68 pass_inline_parameters
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
79 pass_early_inlining
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
86 The pass is run during conversion into SSA form. Only functions already
87 converted into SSA form are inlined, so the conversion must happen in
88 topological order on the callgraph (that is maintained by pass manager).
89 The functions after inlining are early optimized so the early inliner sees
90 unoptimized function itself, but all considered callees are already
91 optimized allowing it to unfold abstraction penalty on C++ effectively and
92 cheaply.
94 pass_ipa_early_inlining
96 With profiling, the early inlining is also necessary to reduce
97 instrumentation costs on program with high abstraction penalty (doing
98 many redundant calls). This can't happen in parallel with early
99 optimization and profile instrumentation, because we would end up
100 re-instrumenting already instrumented function bodies we brought in via
101 inlining.
103 To avoid this, this pass is executed as IPA pass before profiling. It is
104 simple wrapper to pass_early_inlining and ensures first inlining.
106 pass_ipa_inline
108 This is the main pass implementing simple greedy algorithm to do inlining
109 of small functions that results in overall growth of compilation unit and
110 inlining of functions called once. The pass compute just so called inline
111 plan (representation of inlining to be done in callgraph) and unlike early
112 inlining it is not performing the inlining itself.
114 pass_apply_inline
116 This pass performs actual inlining according to pass_ipa_inline on given
117 function. Possible the function body before inlining is saved when it is
118 needed for further inlining later.
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "tm.h"
125 #include "tree.h"
126 #include "tree-inline.h"
127 #include "langhooks.h"
128 #include "flags.h"
129 #include "cgraph.h"
130 #include "diagnostic.h"
131 #include "gimple-pretty-print.h"
132 #include "timevar.h"
133 #include "params.h"
134 #include "fibheap.h"
135 #include "intl.h"
136 #include "tree-pass.h"
137 #include "hashtab.h"
138 #include "coverage.h"
139 #include "ggc.h"
140 #include "tree-flow.h"
141 #include "rtl.h"
142 #include "ipa-prop.h"
143 #include "except.h"
145 #define MAX_TIME 1000000000
147 /* Mode incremental inliner operate on:
149 In ALWAYS_INLINE only functions marked
150 always_inline are inlined. This mode is used after detecting cycle during
151 flattening.
153 In SIZE mode, only functions that reduce function body size after inlining
154 are inlined, this is used during early inlining.
156 in ALL mode, everything is inlined. This is used during flattening. */
157 enum inlining_mode {
158 INLINE_NONE = 0,
159 INLINE_ALWAYS_INLINE,
160 INLINE_SIZE_NORECURSIVE,
161 INLINE_SIZE,
162 INLINE_ALL
165 static bool
166 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode);
167 static void cgraph_flatten (struct cgraph_node *node);
170 /* Statistics we collect about inlining algorithm. */
171 static int ncalls_inlined;
172 static int nfunctions_inlined;
173 static int overall_size;
174 static gcov_type max_count, max_benefit;
176 /* Holders of ipa cgraph hooks: */
177 static struct cgraph_node_hook_list *function_insertion_hook_holder;
179 static inline struct inline_summary *
180 inline_summary (struct cgraph_node *node)
182 return &node->local.inline_summary;
185 /* Estimate self time of the function after inlining WHAT into TO. */
187 static int
188 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
189 struct cgraph_node *what)
191 gcov_type time = (((gcov_type)what->global.time
192 - inline_summary (what)->time_inlining_benefit)
193 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
194 + to->global.time;
195 if (time < 0)
196 time = 0;
197 if (time > MAX_TIME)
198 time = MAX_TIME;
199 return time;
202 /* Estimate self time of the function after inlining WHAT into TO. */
204 static inline int
205 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
206 struct cgraph_node *what)
208 int size = ((what->global.size - inline_summary (what)->size_inlining_benefit)
209 * times + to->global.size);
210 gcc_assert (size >= 0);
211 return size;
214 /* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
215 by NEST. */
217 static void
218 update_noncloned_frequencies (struct cgraph_node *node,
219 int freq_scale, int nest)
221 struct cgraph_edge *e;
223 /* We do not want to ignore high loop nest after freq drops to 0. */
224 if (!freq_scale)
225 freq_scale = 1;
226 for (e = node->callees; e; e = e->next_callee)
228 e->loop_nest += nest;
229 e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
230 if (e->frequency > CGRAPH_FREQ_MAX)
231 e->frequency = CGRAPH_FREQ_MAX;
232 if (!e->inline_failed)
233 update_noncloned_frequencies (e->callee, freq_scale, nest);
237 /* E is expected to be an edge being inlined. Clone destination node of
238 the edge and redirect it to the new clone.
239 DUPLICATE is used for bookkeeping on whether we are actually creating new
240 clones or re-using node originally representing out-of-line function call.
242 void
243 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
244 bool update_original)
246 HOST_WIDE_INT peak;
248 if (duplicate)
250 /* We may eliminate the need for out-of-line copy to be output.
251 In that case just go ahead and re-use it. */
252 if (!e->callee->callers->next_caller
253 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
254 /* Inlining might enable more devirtualizing, so we want to remove
255 those only after all devirtualizable virtual calls are processed.
256 Lacking may edges in callgraph we just preserve them post
257 inlining. */
258 && (!DECL_VIRTUAL_P (e->callee->decl)
259 || (!DECL_COMDAT (e->callee->decl) && !DECL_EXTERNAL (e->callee->decl)))
260 /* Don't reuse if more than one function shares a comdat group.
261 If the other function(s) are needed, we need to emit even
262 this function out of line. */
263 && !e->callee->same_comdat_group
264 && !cgraph_new_nodes)
266 gcc_assert (!e->callee->global.inlined_to);
267 if (e->callee->analyzed)
269 overall_size -= e->callee->global.size;
270 nfunctions_inlined++;
272 duplicate = false;
273 e->callee->local.externally_visible = false;
274 update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
276 else
278 struct cgraph_node *n;
279 n = cgraph_clone_node (e->callee, e->callee->decl,
280 e->count, e->frequency, e->loop_nest,
281 update_original, NULL);
282 cgraph_redirect_edge_callee (e, n);
286 if (e->caller->global.inlined_to)
287 e->callee->global.inlined_to = e->caller->global.inlined_to;
288 else
289 e->callee->global.inlined_to = e->caller;
290 e->callee->global.stack_frame_offset
291 = e->caller->global.stack_frame_offset
292 + inline_summary (e->caller)->estimated_self_stack_size;
293 peak = e->callee->global.stack_frame_offset
294 + inline_summary (e->callee)->estimated_self_stack_size;
295 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
296 e->callee->global.inlined_to->global.estimated_stack_size = peak;
297 cgraph_propagate_frequency (e->callee);
299 /* Recursively clone all bodies. */
300 for (e = e->callee->callees; e; e = e->next_callee)
301 if (!e->inline_failed)
302 cgraph_clone_inlined_nodes (e, duplicate, update_original);
305 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
306 specify whether profile of original function should be updated. If any new
307 indirect edges are discovered in the process, add them to NEW_EDGES, unless
308 it is NULL. Return true iff any new callgraph edges were discovered as a
309 result of inlining. */
311 static bool
312 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
313 VEC (cgraph_edge_p, heap) **new_edges)
315 int old_size = 0, new_size = 0;
316 struct cgraph_node *to = NULL, *what;
317 struct cgraph_edge *curr = e;
318 int freq;
320 gcc_assert (e->inline_failed);
321 e->inline_failed = CIF_OK;
322 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
324 cgraph_clone_inlined_nodes (e, true, update_original);
326 what = e->callee;
328 freq = e->frequency;
329 /* Now update size of caller and all functions caller is inlined into. */
330 for (;e && !e->inline_failed; e = e->caller->callers)
332 to = e->caller;
333 old_size = e->caller->global.size;
334 new_size = cgraph_estimate_size_after_inlining (1, to, what);
335 to->global.size = new_size;
336 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
338 gcc_assert (what->global.inlined_to == to);
339 if (new_size > old_size)
340 overall_size += new_size - old_size;
341 ncalls_inlined++;
343 /* FIXME: We should remove the optimize check after we ensure we never run
344 IPA passes when not optimizng. */
345 if (flag_indirect_inlining && optimize)
346 return ipa_propagate_indirect_call_infos (curr, new_edges);
347 else
348 return false;
351 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER. */
353 static void
354 cgraph_mark_inline (struct cgraph_edge *edge)
356 struct cgraph_node *to = edge->caller;
357 struct cgraph_node *what = edge->callee;
358 struct cgraph_edge *e, *next;
360 gcc_assert (!edge->call_stmt_cannot_inline_p);
361 /* Look for all calls, mark them inline and clone recursively
362 all inlined functions. */
363 for (e = what->callers; e; e = next)
365 next = e->next_caller;
366 if (e->caller == to && e->inline_failed)
368 cgraph_mark_inline_edge (e, true, NULL);
369 if (e == edge)
370 edge = next;
375 /* Estimate the growth caused by inlining NODE into all callees. */
377 static int
378 cgraph_estimate_growth (struct cgraph_node *node)
380 int growth = 0;
381 struct cgraph_edge *e;
382 bool self_recursive = false;
384 if (node->global.estimated_growth != INT_MIN)
385 return node->global.estimated_growth;
387 for (e = node->callers; e; e = e->next_caller)
389 if (e->caller == node)
390 self_recursive = true;
391 if (e->inline_failed)
392 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
393 - e->caller->global.size);
396 /* ??? Wrong for non-trivially self recursive functions or cases where
397 we decide to not inline for different reasons, but it is not big deal
398 as in that case we will keep the body around, but we will also avoid
399 some inlining. */
400 if (cgraph_will_be_removed_from_program_if_no_direct_calls (node)
401 && !DECL_EXTERNAL (node->decl) && !self_recursive)
402 growth -= node->global.size;
404 node->global.estimated_growth = growth;
405 return growth;
408 /* Return false when inlining WHAT into TO is not good idea
409 as it would cause too large growth of function bodies.
410 When ONE_ONLY is true, assume that only one call site is going
411 to be inlined, otherwise figure out how many call sites in
412 TO calls WHAT and verify that all can be inlined.
415 static bool
416 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
417 cgraph_inline_failed_t *reason, bool one_only)
419 int times = 0;
420 struct cgraph_edge *e;
421 int newsize;
422 int limit;
423 HOST_WIDE_INT stack_size_limit, inlined_stack;
425 if (one_only)
426 times = 1;
427 else
428 for (e = to->callees; e; e = e->next_callee)
429 if (e->callee == what)
430 times++;
432 if (to->global.inlined_to)
433 to = to->global.inlined_to;
435 /* When inlining large function body called once into small function,
436 take the inlined function as base for limiting the growth. */
437 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
438 limit = inline_summary (to)->self_size;
439 else
440 limit = inline_summary (what)->self_size;
442 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
444 /* Check the size after inlining against the function limits. But allow
445 the function to shrink if it went over the limits by forced inlining. */
446 newsize = cgraph_estimate_size_after_inlining (times, to, what);
447 if (newsize >= to->global.size
448 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
449 && newsize > limit)
451 if (reason)
452 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
453 return false;
456 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
458 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
460 inlined_stack = (to->global.stack_frame_offset
461 + inline_summary (to)->estimated_self_stack_size
462 + what->global.estimated_stack_size);
463 if (inlined_stack > stack_size_limit
464 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
466 if (reason)
467 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
468 return false;
470 return true;
473 /* Return true when function N is small enough to be inlined. */
475 static bool
476 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
478 tree decl = n->decl;
480 if (n->local.disregard_inline_limits)
481 return true;
483 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
485 if (reason)
486 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
487 return false;
489 if (!n->analyzed)
491 if (reason)
492 *reason = CIF_BODY_NOT_AVAILABLE;
493 return false;
495 if (cgraph_function_body_availability (n) <= AVAIL_OVERWRITABLE)
497 if (reason)
498 *reason = CIF_OVERWRITABLE;
499 return false;
503 if (DECL_DECLARED_INLINE_P (decl))
505 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
507 if (reason)
508 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
509 return false;
512 else
514 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
516 if (reason)
517 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
518 return false;
522 return true;
525 /* Return true when inlining WHAT would create recursive inlining.
526 We call recursive inlining all cases where same function appears more than
527 once in the single recursion nest path in the inline graph. */
529 static inline bool
530 cgraph_recursive_inlining_p (struct cgraph_node *to,
531 struct cgraph_node *what,
532 cgraph_inline_failed_t *reason)
534 bool recursive;
535 if (to->global.inlined_to)
536 recursive = what->decl == to->global.inlined_to->decl;
537 else
538 recursive = what->decl == to->decl;
539 /* Marking recursive function inline has sane semantic and thus we should
540 not warn on it. */
541 if (recursive && reason)
542 *reason = (what->local.disregard_inline_limits
543 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
544 return recursive;
547 /* A cost model driving the inlining heuristics in a way so the edges with
548 smallest badness are inlined first. After each inlining is performed
549 the costs of all caller edges of nodes affected are recomputed so the
550 metrics may accurately depend on values such as number of inlinable callers
551 of the function or function body size. */
553 static int
554 cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
556 gcov_type badness;
557 int growth =
558 (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
559 - edge->caller->global.size);
561 if (edge->callee->local.disregard_inline_limits)
562 return INT_MIN;
564 if (dump)
566 fprintf (dump_file, " Badness calculcation for %s -> %s\n",
567 cgraph_node_name (edge->caller),
568 cgraph_node_name (edge->callee));
569 fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
570 growth,
571 edge->callee->global.time,
572 inline_summary (edge->callee)->time_inlining_benefit,
573 edge->callee->global.size,
574 inline_summary (edge->callee)->size_inlining_benefit);
577 /* Always prefer inlining saving code size. */
578 if (growth <= 0)
580 badness = INT_MIN - growth;
581 if (dump)
582 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
583 growth);
586 /* When profiling is available, base priorities -(#calls / growth).
587 So we optimize for overall number of "executed" inlined calls. */
588 else if (max_count)
590 badness =
591 ((int)
592 ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
593 (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
594 if (dump)
596 fprintf (dump_file,
597 " %i (relative %f): profile info. Relative count %f"
598 " * Relative benefit %f\n",
599 (int) badness, (double) badness / INT_MIN,
600 (double) edge->count / max_count,
601 (double) (inline_summary (edge->callee)->
602 time_inlining_benefit + 1) / (max_benefit + 1));
606 /* When function local profile is available, base priorities on
607 growth / frequency, so we optimize for overall frequency of inlined
608 calls. This is not too accurate since while the call might be frequent
609 within function, the function itself is infrequent.
611 Other objective to optimize for is number of different calls inlined.
612 We add the estimated growth after inlining all functions to bias the
613 priorities slightly in this direction (so fewer times called functions
614 of the same size gets priority). */
615 else if (flag_guess_branch_prob)
617 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
618 int benefitperc;
619 int growth_for_all;
620 badness = growth * 10000;
621 benefitperc =
622 MIN (100 * inline_summary (edge->callee)->time_inlining_benefit /
623 (edge->callee->global.time + 1) +1, 100);
624 div *= benefitperc;
627 /* Decrease badness if call is nested. */
628 /* Compress the range so we don't overflow. */
629 if (div > 10000)
630 div = 10000 + ceil_log2 (div) - 8;
631 if (div < 1)
632 div = 1;
633 if (badness > 0)
634 badness /= div;
635 growth_for_all = cgraph_estimate_growth (edge->callee);
636 badness += growth_for_all;
637 if (badness > INT_MAX)
638 badness = INT_MAX;
639 if (dump)
641 fprintf (dump_file,
642 " %i: guessed profile. frequency %i, overall growth %i,"
643 " benefit %i%%, divisor %i\n",
644 (int) badness, edge->frequency, growth_for_all, benefitperc, div);
647 /* When function local profile is not available or it does not give
648 useful information (ie frequency is zero), base the cost on
649 loop nest and overall size growth, so we optimize for overall number
650 of functions fully inlined in program. */
651 else
653 int nest = MIN (edge->loop_nest, 8);
654 badness = cgraph_estimate_growth (edge->callee) * 256;
656 /* Decrease badness if call is nested. */
657 if (badness > 0)
658 badness >>= nest;
659 else
661 badness <<= nest;
663 if (dump)
664 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
665 nest);
668 /* Ensure that we did not overflow in all the fixed point math above. */
669 gcc_assert (badness >= INT_MIN);
670 gcc_assert (badness <= INT_MAX - 1);
671 /* Make recursive inlining happen always after other inlining is done. */
672 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
673 return badness + 1;
674 else
675 return badness;
678 /* Recompute badness of EDGE and update its key in HEAP if needed. */
679 static void
680 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
682 int badness = cgraph_edge_badness (edge, false);
683 if (edge->aux)
685 fibnode_t n = (fibnode_t) edge->aux;
686 gcc_checking_assert (n->data == edge);
688 /* fibheap_replace_key only decrease the keys.
689 When we increase the key we do not update heap
690 and instead re-insert the element once it becomes
691 a minium of heap. */
692 if (badness < n->key)
694 fibheap_replace_key (heap, n, badness);
695 gcc_checking_assert (n->key == badness);
698 else
699 edge->aux = fibheap_insert (heap, badness, edge);
702 /* Recompute heap nodes for each of caller edge. */
704 static void
705 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
706 bitmap updated_nodes)
708 struct cgraph_edge *edge;
709 cgraph_inline_failed_t failed_reason;
711 if (!node->local.inlinable
712 || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
713 || node->global.inlined_to)
714 return;
715 if (!bitmap_set_bit (updated_nodes, node->uid))
716 return;
717 node->global.estimated_growth = INT_MIN;
719 /* See if there is something to do. */
720 for (edge = node->callers; edge; edge = edge->next_caller)
721 if (edge->inline_failed)
722 break;
723 if (!edge)
724 return;
725 /* Prune out edges we won't inline into anymore. */
726 if (!cgraph_default_inline_p (node, &failed_reason))
728 for (; edge; edge = edge->next_caller)
729 if (edge->aux)
731 fibheap_delete_node (heap, (fibnode_t) edge->aux);
732 edge->aux = NULL;
733 if (edge->inline_failed)
734 edge->inline_failed = failed_reason;
736 return;
739 for (; edge; edge = edge->next_caller)
740 if (edge->inline_failed)
741 update_edge_key (heap, edge);
744 /* Recompute heap nodes for each uninlined call.
745 This is used when we know that edge badnesses are going only to increase
746 (we introduced new call site) and thus all we need is to insert newly
747 created edges into heap. */
749 static void
750 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
751 bitmap updated_nodes)
753 struct cgraph_edge *e = node->callees;
754 node->global.estimated_growth = INT_MIN;
756 if (!e)
757 return;
758 while (true)
759 if (!e->inline_failed && e->callee->callees)
760 e = e->callee->callees;
761 else
763 if (e->inline_failed
764 && e->callee->local.inlinable
765 && cgraph_function_body_availability (e->callee) >= AVAIL_AVAILABLE
766 && !bitmap_bit_p (updated_nodes, e->callee->uid))
768 node->global.estimated_growth = INT_MIN;
769 /* If function becomes uninlinable, we need to remove it from the heap. */
770 if (!cgraph_default_inline_p (e->callee, &e->inline_failed))
771 update_caller_keys (heap, e->callee, updated_nodes);
772 else
773 /* Otherwise update just edge E. */
774 update_edge_key (heap, e);
776 if (e->next_callee)
777 e = e->next_callee;
778 else
782 if (e->caller == node)
783 return;
784 e = e->caller->callers;
786 while (!e->next_callee);
787 e = e->next_callee;
792 /* Recompute heap nodes for each of caller edges of each of callees.
793 Walk recursively into all inline clones. */
795 static void
796 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
797 bitmap updated_nodes)
799 struct cgraph_edge *e = node->callees;
800 node->global.estimated_growth = INT_MIN;
802 if (!e)
803 return;
804 while (true)
805 if (!e->inline_failed && e->callee->callees)
806 e = e->callee->callees;
807 else
809 if (e->inline_failed)
810 update_caller_keys (heap, e->callee, updated_nodes);
811 if (e->next_callee)
812 e = e->next_callee;
813 else
817 if (e->caller == node)
818 return;
819 e = e->caller->callers;
821 while (!e->next_callee);
822 e = e->next_callee;
827 /* Enqueue all recursive calls from NODE into priority queue depending on
828 how likely we want to recursively inline the call. */
830 static void
831 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
832 fibheap_t heap)
834 static int priority;
835 struct cgraph_edge *e;
836 for (e = where->callees; e; e = e->next_callee)
837 if (e->callee == node)
839 /* When profile feedback is available, prioritize by expected number
840 of calls. Without profile feedback we maintain simple queue
841 to order candidates via recursive depths. */
842 fibheap_insert (heap,
843 !max_count ? priority++
844 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
847 for (e = where->callees; e; e = e->next_callee)
848 if (!e->inline_failed)
849 lookup_recursive_calls (node, e->callee, heap);
852 /* Decide on recursive inlining: in the case function has recursive calls,
853 inline until body size reaches given argument. If any new indirect edges
854 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
855 is NULL. */
857 static bool
858 cgraph_decide_recursive_inlining (struct cgraph_node *node,
859 VEC (cgraph_edge_p, heap) **new_edges)
861 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
862 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
863 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
864 fibheap_t heap;
865 struct cgraph_edge *e;
866 struct cgraph_node *master_clone, *next;
867 int depth = 0;
868 int n = 0;
870 /* It does not make sense to recursively inline always-inline functions
871 as we are going to sorry() on the remaining calls anyway. */
872 if (node->local.disregard_inline_limits
873 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl)))
874 return false;
876 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
877 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
878 return false;
880 if (DECL_DECLARED_INLINE_P (node->decl))
882 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
883 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
886 /* Make sure that function is small enough to be considered for inlining. */
887 if (!max_depth
888 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
889 return false;
890 heap = fibheap_new ();
891 lookup_recursive_calls (node, node, heap);
892 if (fibheap_empty (heap))
894 fibheap_delete (heap);
895 return false;
898 if (dump_file)
899 fprintf (dump_file,
900 " Performing recursive inlining on %s\n",
901 cgraph_node_name (node));
903 /* We need original clone to copy around. */
904 master_clone = cgraph_clone_node (node, node->decl,
905 node->count, CGRAPH_FREQ_BASE, 1,
906 false, NULL);
907 master_clone->needed = true;
908 for (e = master_clone->callees; e; e = e->next_callee)
909 if (!e->inline_failed)
910 cgraph_clone_inlined_nodes (e, true, false);
912 /* Do the inlining and update list of recursive call during process. */
913 while (!fibheap_empty (heap)
914 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
915 <= limit))
917 struct cgraph_edge *curr
918 = (struct cgraph_edge *) fibheap_extract_min (heap);
919 struct cgraph_node *cnode;
921 depth = 1;
922 for (cnode = curr->caller;
923 cnode->global.inlined_to; cnode = cnode->callers->caller)
924 if (node->decl == curr->callee->decl)
925 depth++;
926 if (depth > max_depth)
928 if (dump_file)
929 fprintf (dump_file,
930 " maximal depth reached\n");
931 continue;
934 if (max_count)
936 if (!cgraph_maybe_hot_edge_p (curr))
938 if (dump_file)
939 fprintf (dump_file, " Not inlining cold call\n");
940 continue;
942 if (curr->count * 100 / node->count < probability)
944 if (dump_file)
945 fprintf (dump_file,
946 " Probability of edge is too small\n");
947 continue;
951 if (dump_file)
953 fprintf (dump_file,
954 " Inlining call of depth %i", depth);
955 if (node->count)
957 fprintf (dump_file, " called approx. %.2f times per call",
958 (double)curr->count / node->count);
960 fprintf (dump_file, "\n");
962 cgraph_redirect_edge_callee (curr, master_clone);
963 cgraph_mark_inline_edge (curr, false, new_edges);
964 lookup_recursive_calls (node, curr->callee, heap);
965 n++;
967 if (!fibheap_empty (heap) && dump_file)
968 fprintf (dump_file, " Recursive inlining growth limit met.\n");
970 fibheap_delete (heap);
971 if (dump_file)
972 fprintf (dump_file,
973 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
974 master_clone->global.size, node->global.size,
975 master_clone->global.time, node->global.time);
977 /* Remove master clone we used for inlining. We rely that clones inlined
978 into master clone gets queued just before master clone so we don't
979 need recursion. */
980 for (node = cgraph_nodes; node != master_clone;
981 node = next)
983 next = node->next;
984 if (node->global.inlined_to == master_clone)
985 cgraph_remove_node (node);
987 cgraph_remove_node (master_clone);
988 /* FIXME: Recursive inlining actually reduces number of calls of the
989 function. At this place we should probably walk the function and
990 inline clones and compensate the counts accordingly. This probably
991 doesn't matter much in practice. */
992 return n > 0;
995 /* Set inline_failed for all callers of given function to REASON. */
997 static void
998 cgraph_set_inline_failed (struct cgraph_node *node,
999 cgraph_inline_failed_t reason)
1001 struct cgraph_edge *e;
1003 if (dump_file)
1004 fprintf (dump_file, "Inlining failed: %s\n",
1005 cgraph_inline_failed_string (reason));
1006 for (e = node->callers; e; e = e->next_caller)
1007 if (e->inline_failed)
1008 e->inline_failed = reason;
1011 /* Given whole compilation unit estimate of INSNS, compute how large we can
1012 allow the unit to grow. */
1013 static int
1014 compute_max_insns (int insns)
1016 int max_insns = insns;
1017 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1018 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1020 return ((HOST_WIDEST_INT) max_insns
1021 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1024 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1025 static void
1026 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
1028 while (VEC_length (cgraph_edge_p, new_edges) > 0)
1030 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
1032 gcc_assert (!edge->aux);
1033 if (edge->callee->local.inlinable
1034 && edge->inline_failed
1035 && cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1036 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1041 /* We use greedy algorithm for inlining of small functions:
1042 All inline candidates are put into prioritized heap based on estimated
1043 growth of the overall number of instructions and then update the estimates.
1045 INLINED and INLINED_CALEES are just pointers to arrays large enough
1046 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
1048 static void
1049 cgraph_decide_inlining_of_small_functions (void)
1051 struct cgraph_node *node;
1052 struct cgraph_edge *edge;
1053 cgraph_inline_failed_t failed_reason;
1054 fibheap_t heap = fibheap_new ();
1055 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1056 int min_size, max_size;
1057 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1059 if (flag_indirect_inlining)
1060 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1062 if (dump_file)
1063 fprintf (dump_file, "\nDeciding on smaller functions:\n");
1065 /* Put all inline candidates into the heap. */
1067 for (node = cgraph_nodes; node; node = node->next)
1069 if (!node->local.inlinable || !node->callers)
1070 continue;
1071 if (dump_file)
1072 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
1074 node->global.estimated_growth = INT_MIN;
1075 if (!cgraph_default_inline_p (node, &failed_reason))
1077 cgraph_set_inline_failed (node, failed_reason);
1078 continue;
1081 for (edge = node->callers; edge; edge = edge->next_caller)
1082 if (edge->inline_failed)
1084 gcc_assert (!edge->aux);
1085 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1089 max_size = compute_max_insns (overall_size);
1090 min_size = overall_size;
1092 while (overall_size <= max_size
1093 && !fibheap_empty (heap))
1095 int old_size = overall_size;
1096 struct cgraph_node *where, *callee;
1097 int badness = fibheap_min_key (heap);
1098 int current_badness;
1099 int growth;
1100 cgraph_inline_failed_t not_good = CIF_OK;
1102 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1103 gcc_assert (edge->aux);
1104 edge->aux = NULL;
1105 if (!edge->inline_failed)
1106 continue;
1108 /* When updating the edge costs, we only decrease badness in the keys.
1109 When the badness increase, we keep the heap as it is and re-insert
1110 key now. */
1111 current_badness = cgraph_edge_badness (edge, false);
1112 gcc_assert (current_badness >= badness);
1113 if (current_badness != badness)
1115 edge->aux = fibheap_insert (heap, current_badness, edge);
1116 continue;
1119 callee = edge->callee;
1121 growth = (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
1122 - edge->caller->global.size);
1124 if (dump_file)
1126 fprintf (dump_file,
1127 "\nConsidering %s with %i size\n",
1128 cgraph_node_name (edge->callee),
1129 edge->callee->global.size);
1130 fprintf (dump_file,
1131 " to be inlined into %s in %s:%i\n"
1132 " Estimated growth after inlined into all callees is %+i insns.\n"
1133 " Estimated badness is %i, frequency %.2f.\n",
1134 cgraph_node_name (edge->caller),
1135 flag_wpa ? "unknown"
1136 : gimple_filename ((const_gimple) edge->call_stmt),
1137 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1138 cgraph_estimate_growth (edge->callee),
1139 badness,
1140 edge->frequency / (double)CGRAPH_FREQ_BASE);
1141 if (edge->count)
1142 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1143 if (dump_flags & TDF_DETAILS)
1144 cgraph_edge_badness (edge, true);
1147 /* When not having profile info ready we don't weight by any way the
1148 position of call in procedure itself. This means if call of
1149 function A from function B seems profitable to inline, the recursive
1150 call of function A in inline copy of A in B will look profitable too
1151 and we end up inlining until reaching maximal function growth. This
1152 is not good idea so prohibit the recursive inlining.
1154 ??? When the frequencies are taken into account we might not need this
1155 restriction.
1157 We need to be cureful here, in some testcases, e.g. directivec.c in
1158 libcpp, we can estimate self recursive function to have negative growth
1159 for inlining completely.
1161 if (!edge->count)
1163 where = edge->caller;
1164 while (where->global.inlined_to)
1166 if (where->decl == edge->callee->decl)
1167 break;
1168 where = where->callers->caller;
1170 if (where->global.inlined_to)
1172 edge->inline_failed
1173 = (edge->callee->local.disregard_inline_limits
1174 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1175 if (dump_file)
1176 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
1177 continue;
1181 if (edge->callee->local.disregard_inline_limits)
1183 else if (!cgraph_maybe_hot_edge_p (edge))
1184 not_good = CIF_UNLIKELY_CALL;
1185 else if (!flag_inline_functions
1186 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
1187 not_good = CIF_NOT_DECLARED_INLINED;
1188 else if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
1189 not_good = CIF_OPTIMIZING_FOR_SIZE;
1190 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
1192 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1193 &edge->inline_failed))
1195 edge->inline_failed = not_good;
1196 if (dump_file)
1197 fprintf (dump_file, " inline_failed:%s.\n",
1198 cgraph_inline_failed_string (edge->inline_failed));
1200 continue;
1202 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1204 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1205 &edge->inline_failed))
1207 if (dump_file)
1208 fprintf (dump_file, " inline_failed:%s.\n",
1209 cgraph_inline_failed_string (edge->inline_failed));
1211 continue;
1213 if (!tree_can_inline_p (edge))
1215 if (dump_file)
1216 fprintf (dump_file, " inline_failed:%s.\n",
1217 cgraph_inline_failed_string (edge->inline_failed));
1218 continue;
1220 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1221 &edge->inline_failed))
1223 where = edge->caller;
1224 if (where->global.inlined_to)
1225 where = where->global.inlined_to;
1226 if (!cgraph_decide_recursive_inlining (where,
1227 flag_indirect_inlining
1228 ? &new_indirect_edges : NULL))
1229 continue;
1230 if (flag_indirect_inlining)
1231 add_new_edges_to_heap (heap, new_indirect_edges);
1232 update_all_callee_keys (heap, where, updated_nodes);
1234 else
1236 struct cgraph_node *callee;
1237 if (edge->call_stmt_cannot_inline_p
1238 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1239 &edge->inline_failed, true))
1241 if (dump_file)
1242 fprintf (dump_file, " Not inlining into %s:%s.\n",
1243 cgraph_node_name (edge->caller),
1244 cgraph_inline_failed_string (edge->inline_failed));
1245 continue;
1247 callee = edge->callee;
1248 gcc_checking_assert (!callee->global.inlined_to);
1249 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1250 if (flag_indirect_inlining)
1251 add_new_edges_to_heap (heap, new_indirect_edges);
1253 /* We inlined last offline copy to the body. This might lead
1254 to callees of function having fewer call sites and thus they
1255 may need updating. */
1256 if (callee->global.inlined_to)
1257 update_all_callee_keys (heap, callee, updated_nodes);
1258 else
1259 update_callee_keys (heap, edge->callee, updated_nodes);
1261 where = edge->caller;
1262 if (where->global.inlined_to)
1263 where = where->global.inlined_to;
1265 /* Our profitability metric can depend on local properties
1266 such as number of inlinable calls and size of the function body.
1267 After inlining these properties might change for the function we
1268 inlined into (since it's body size changed) and for the functions
1269 called by function we inlined (since number of it inlinable callers
1270 might change). */
1271 update_caller_keys (heap, where, updated_nodes);
1273 /* We removed one call of the function we just inlined. If offline
1274 copy is still needed, be sure to update the keys. */
1275 if (callee != where && !callee->global.inlined_to)
1276 update_caller_keys (heap, callee, updated_nodes);
1277 bitmap_clear (updated_nodes);
1279 if (dump_file)
1281 fprintf (dump_file,
1282 " Inlined into %s which now has size %i and self time %i,"
1283 "net change of %+i.\n",
1284 cgraph_node_name (edge->caller),
1285 edge->caller->global.time,
1286 edge->caller->global.size,
1287 overall_size - old_size);
1289 if (min_size > overall_size)
1291 min_size = overall_size;
1292 max_size = compute_max_insns (min_size);
1294 if (dump_file)
1295 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1298 while (!fibheap_empty (heap))
1300 int badness = fibheap_min_key (heap);
1302 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1303 gcc_assert (edge->aux);
1304 edge->aux = NULL;
1305 if (!edge->inline_failed)
1306 continue;
1307 #ifdef ENABLE_CHECKING
1308 gcc_assert (cgraph_edge_badness (edge, false) >= badness);
1309 #endif
1310 if (dump_file)
1312 fprintf (dump_file,
1313 "\nSkipping %s with %i size\n",
1314 cgraph_node_name (edge->callee),
1315 edge->callee->global.size);
1316 fprintf (dump_file,
1317 " called by %s in %s:%i\n"
1318 " Estimated growth after inlined into all callees is %+i insns.\n"
1319 " Estimated badness is %i, frequency %.2f.\n",
1320 cgraph_node_name (edge->caller),
1321 flag_wpa ? "unknown"
1322 : gimple_filename ((const_gimple) edge->call_stmt),
1323 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1324 cgraph_estimate_growth (edge->callee),
1325 badness,
1326 edge->frequency / (double)CGRAPH_FREQ_BASE);
1327 if (edge->count)
1328 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1329 if (dump_flags & TDF_DETAILS)
1330 cgraph_edge_badness (edge, true);
1332 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1333 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1334 &edge->inline_failed))
1335 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1338 if (new_indirect_edges)
1339 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1340 fibheap_delete (heap);
1341 BITMAP_FREE (updated_nodes);
1344 /* Flatten NODE from the IPA inliner. */
1346 static void
1347 cgraph_flatten (struct cgraph_node *node)
1349 struct cgraph_edge *e;
1351 /* We shouldn't be called recursively when we are being processed. */
1352 gcc_assert (node->aux == NULL);
1354 node->aux = (void *)(size_t) INLINE_ALL;
1356 for (e = node->callees; e; e = e->next_callee)
1358 struct cgraph_node *orig_callee;
1360 if (e->call_stmt_cannot_inline_p)
1361 continue;
1363 if (!e->callee->analyzed)
1365 if (dump_file)
1366 fprintf (dump_file,
1367 "Not inlining: Function body not available.\n");
1368 continue;
1371 /* We've hit cycle? It is time to give up. */
1372 if (e->callee->aux)
1374 if (dump_file)
1375 fprintf (dump_file,
1376 "Not inlining %s into %s to avoid cycle.\n",
1377 cgraph_node_name (e->callee),
1378 cgraph_node_name (e->caller));
1379 e->inline_failed = CIF_RECURSIVE_INLINING;
1380 continue;
1383 /* When the edge is already inlined, we just need to recurse into
1384 it in order to fully flatten the leaves. */
1385 if (!e->inline_failed)
1387 cgraph_flatten (e->callee);
1388 continue;
1391 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1393 if (dump_file)
1394 fprintf (dump_file, "Not inlining: recursive call.\n");
1395 continue;
1398 if (!tree_can_inline_p (e))
1400 if (dump_file)
1401 fprintf (dump_file, "Not inlining: %s",
1402 cgraph_inline_failed_string (e->inline_failed));
1403 continue;
1406 /* Inline the edge and flatten the inline clone. Avoid
1407 recursing through the original node if the node was cloned. */
1408 if (dump_file)
1409 fprintf (dump_file, " Inlining %s into %s.\n",
1410 cgraph_node_name (e->callee),
1411 cgraph_node_name (e->caller));
1412 orig_callee = e->callee;
1413 cgraph_mark_inline_edge (e, true, NULL);
1414 if (e->callee != orig_callee)
1415 orig_callee->aux = (void *)(size_t) INLINE_ALL;
1416 cgraph_flatten (e->callee);
1417 if (e->callee != orig_callee)
1418 orig_callee->aux = NULL;
1421 node->aux = NULL;
1424 /* Decide on the inlining. We do so in the topological order to avoid
1425 expenses on updating data structures. */
1427 static unsigned int
1428 cgraph_decide_inlining (void)
1430 struct cgraph_node *node;
1431 int nnodes;
1432 struct cgraph_node **order =
1433 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1434 int old_size = 0;
1435 int i;
1436 int initial_size = 0;
1438 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1439 if (in_lto_p && flag_indirect_inlining)
1440 ipa_update_after_lto_read ();
1441 if (flag_indirect_inlining)
1442 ipa_create_all_structures_for_iinln ();
1444 max_count = 0;
1445 max_benefit = 0;
1446 for (node = cgraph_nodes; node; node = node->next)
1447 if (node->analyzed)
1449 struct cgraph_edge *e;
1451 gcc_assert (inline_summary (node)->self_size == node->global.size);
1452 initial_size += node->global.size;
1453 for (e = node->callees; e; e = e->next_callee)
1454 if (max_count < e->count)
1455 max_count = e->count;
1456 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1457 max_benefit = inline_summary (node)->time_inlining_benefit;
1459 gcc_assert (in_lto_p
1460 || !max_count
1461 || (profile_info && flag_branch_probabilities));
1462 overall_size = initial_size;
1464 nnodes = cgraph_postorder (order);
1466 if (dump_file)
1467 fprintf (dump_file,
1468 "\nDeciding on inlining. Starting with size %i.\n",
1469 initial_size);
1471 for (node = cgraph_nodes; node; node = node->next)
1472 node->aux = 0;
1474 if (dump_file)
1475 fprintf (dump_file, "\nFlattening functions:\n");
1477 /* In the first pass handle functions to be flattened. Do this with
1478 a priority so none of our later choices will make this impossible. */
1479 for (i = nnodes - 1; i >= 0; i--)
1481 node = order[i];
1483 /* Handle nodes to be flattened, but don't update overall unit
1484 size. Calling the incremental inliner here is lame,
1485 a simple worklist should be enough. What should be left
1486 here from the early inliner (if it runs) is cyclic cases.
1487 Ideally when processing callees we stop inlining at the
1488 entry of cycles, possibly cloning that entry point and
1489 try to flatten itself turning it into a self-recursive
1490 function. */
1491 if (lookup_attribute ("flatten",
1492 DECL_ATTRIBUTES (node->decl)) != NULL)
1494 if (dump_file)
1495 fprintf (dump_file,
1496 "Flattening %s\n", cgraph_node_name (node));
1497 cgraph_flatten (node);
1501 cgraph_decide_inlining_of_small_functions ();
1503 if (flag_inline_functions_called_once)
1505 if (dump_file)
1506 fprintf (dump_file, "\nDeciding on functions called once:\n");
1508 /* And finally decide what functions are called once. */
1509 for (i = nnodes - 1; i >= 0; i--)
1511 node = order[i];
1513 if (node->callers
1514 && !node->callers->next_caller
1515 && cgraph_will_be_removed_from_program_if_no_direct_calls (node)
1516 && node->local.inlinable
1517 && cgraph_function_body_availability (node) >= AVAIL_AVAILABLE
1518 && node->callers->inline_failed
1519 && node->callers->caller != node
1520 && node->callers->caller->global.inlined_to != node
1521 && !node->callers->call_stmt_cannot_inline_p
1522 && !DECL_EXTERNAL (node->decl))
1524 cgraph_inline_failed_t reason;
1525 old_size = overall_size;
1526 if (dump_file)
1528 fprintf (dump_file,
1529 "\nConsidering %s size %i.\n",
1530 cgraph_node_name (node), node->global.size);
1531 fprintf (dump_file,
1532 " Called once from %s %i insns.\n",
1533 cgraph_node_name (node->callers->caller),
1534 node->callers->caller->global.size);
1537 if (cgraph_check_inline_limits (node->callers->caller, node,
1538 &reason, false))
1540 struct cgraph_node *caller = node->callers->caller;
1541 cgraph_mark_inline (node->callers);
1542 if (dump_file)
1543 fprintf (dump_file,
1544 " Inlined into %s which now has %i size"
1545 " for a net change of %+i size.\n",
1546 cgraph_node_name (caller),
1547 caller->global.size,
1548 overall_size - old_size);
1550 else
1552 if (dump_file)
1553 fprintf (dump_file,
1554 " Not inlining: %s.\n",
1555 cgraph_inline_failed_string (reason));
1561 /* Free ipa-prop structures if they are no longer needed. */
1562 if (flag_indirect_inlining)
1563 ipa_free_all_structures_after_iinln ();
1565 if (dump_file)
1566 fprintf (dump_file,
1567 "\nInlined %i calls, eliminated %i functions, "
1568 "size %i turned to %i size.\n\n",
1569 ncalls_inlined, nfunctions_inlined, initial_size,
1570 overall_size);
1571 free (order);
1572 return 0;
1575 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1576 in leaf functions. */
1577 static bool
1578 leaf_node_p (struct cgraph_node *n)
1580 struct cgraph_edge *e;
1581 for (e = n->callees; e; e = e->next_callee)
1582 if (!DECL_BUILT_IN (e->callee->decl)
1583 || (!TREE_READONLY (e->callee->decl)
1584 || DECL_PURE_P (e->callee->decl)))
1585 return false;
1586 return true;
1589 /* Decide on the inlining. We do so in the topological order to avoid
1590 expenses on updating data structures. */
1592 static bool
1593 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1594 enum inlining_mode mode)
1596 struct cgraph_edge *e;
1597 bool inlined = false;
1598 cgraph_inline_failed_t failed_reason;
1600 #ifdef ENABLE_CHECKING
1601 verify_cgraph_node (node);
1602 #endif
1604 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1605 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1607 if (dump_file)
1608 fprintf (dump_file, "Incrementally flattening %s\n",
1609 cgraph_node_name (node));
1610 mode = INLINE_ALL;
1613 /* First of all look for always inline functions. */
1614 if (mode != INLINE_SIZE_NORECURSIVE)
1615 for (e = node->callees; e; e = e->next_callee)
1617 if (!e->callee->local.disregard_inline_limits
1618 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1619 continue;
1620 if (e->call_stmt_cannot_inline_p)
1621 continue;
1622 if (dump_file)
1623 fprintf (dump_file,
1624 "Considering to always inline inline candidate %s.\n",
1625 cgraph_node_name (e->callee));
1626 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1628 if (dump_file)
1629 fprintf (dump_file, "Not inlining: recursive call.\n");
1630 continue;
1632 if (!tree_can_inline_p (e))
1634 if (dump_file)
1635 fprintf (dump_file,
1636 "Not inlining: %s",
1637 cgraph_inline_failed_string (e->inline_failed));
1638 continue;
1640 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1641 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1643 if (dump_file)
1644 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1645 continue;
1647 if (!e->callee->analyzed)
1649 if (dump_file)
1650 fprintf (dump_file,
1651 "Not inlining: Function body no longer available.\n");
1652 continue;
1655 if (dump_file)
1656 fprintf (dump_file, " Inlining %s into %s.\n",
1657 cgraph_node_name (e->callee),
1658 cgraph_node_name (e->caller));
1659 cgraph_mark_inline (e);
1660 inlined = true;
1663 /* Now do the automatic inlining. */
1664 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE
1665 /* Never inline regular functions into always-inline functions
1666 during incremental inlining. */
1667 && !node->local.disregard_inline_limits)
1669 bitmap visited = BITMAP_ALLOC (NULL);
1670 for (e = node->callees; e; e = e->next_callee)
1672 int allowed_growth = 0;
1673 if (!e->callee->local.inlinable
1674 || !e->inline_failed
1675 || e->callee->local.disregard_inline_limits)
1676 continue;
1677 /* We are inlining a function to all call-sites in node
1678 or to none. So visit each candidate only once. */
1679 if (!bitmap_set_bit (visited, e->callee->uid))
1680 continue;
1681 if (dump_file)
1682 fprintf (dump_file, "Considering inline candidate %s.\n",
1683 cgraph_node_name (e->callee));
1684 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1686 if (dump_file)
1687 fprintf (dump_file, "Not inlining: recursive call.\n");
1688 continue;
1690 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1691 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1693 if (dump_file)
1694 fprintf (dump_file,
1695 "Not inlining: SSA form does not match.\n");
1696 continue;
1699 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1700 && optimize_function_for_speed_p (cfun))
1701 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1703 /* When the function body would grow and inlining the function
1704 won't eliminate the need for offline copy of the function,
1705 don't inline. */
1706 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1707 || (!flag_inline_functions
1708 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1709 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1710 > e->caller->global.size + allowed_growth)
1711 && cgraph_estimate_growth (e->callee) > allowed_growth)
1713 if (dump_file)
1714 fprintf (dump_file,
1715 "Not inlining: code size would grow by %i.\n",
1716 cgraph_estimate_size_after_inlining (1, e->caller,
1717 e->callee)
1718 - e->caller->global.size);
1719 continue;
1721 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1722 false)
1723 || e->call_stmt_cannot_inline_p)
1725 if (dump_file)
1726 fprintf (dump_file, "Not inlining: %s.\n",
1727 cgraph_inline_failed_string (e->inline_failed));
1728 continue;
1730 if (!e->callee->analyzed)
1732 if (dump_file)
1733 fprintf (dump_file,
1734 "Not inlining: Function body no longer available.\n");
1735 continue;
1737 if (!tree_can_inline_p (e))
1739 if (dump_file)
1740 fprintf (dump_file,
1741 "Not inlining: %s.",
1742 cgraph_inline_failed_string (e->inline_failed));
1743 continue;
1745 if (cgraph_default_inline_p (e->callee, &failed_reason))
1747 if (dump_file)
1748 fprintf (dump_file, " Inlining %s into %s.\n",
1749 cgraph_node_name (e->callee),
1750 cgraph_node_name (e->caller));
1751 cgraph_mark_inline (e);
1752 inlined = true;
1755 BITMAP_FREE (visited);
1757 return inlined;
1760 /* Because inlining might remove no-longer reachable nodes, we need to
1761 keep the array visible to garbage collector to avoid reading collected
1762 out nodes. */
1763 static int nnodes;
1764 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1766 /* Do inlining of small functions. Doing so early helps profiling and other
1767 passes to be somewhat more effective and avoids some code duplication in
1768 later real inlining pass for testcases with very many function calls. */
1769 static unsigned int
1770 cgraph_early_inlining (void)
1772 struct cgraph_node *node = cgraph_node (current_function_decl);
1773 unsigned int todo = 0;
1774 int iterations = 0;
1776 if (seen_error ())
1777 return 0;
1779 if (!optimize
1780 || flag_no_inline
1781 || !flag_early_inlining)
1783 /* When not optimizing or not inlining inline only always-inline
1784 functions. */
1785 cgraph_decide_inlining_incrementally (node, INLINE_ALWAYS_INLINE);
1786 timevar_push (TV_INTEGRATION);
1787 todo |= optimize_inline_calls (current_function_decl);
1788 timevar_pop (TV_INTEGRATION);
1790 else
1792 if (lookup_attribute ("flatten",
1793 DECL_ATTRIBUTES (node->decl)) != NULL)
1795 if (dump_file)
1796 fprintf (dump_file,
1797 "Flattening %s\n", cgraph_node_name (node));
1798 cgraph_flatten (node);
1799 timevar_push (TV_INTEGRATION);
1800 todo |= optimize_inline_calls (current_function_decl);
1801 timevar_pop (TV_INTEGRATION);
1803 /* We iterate incremental inlining to get trivial cases of indirect
1804 inlining. */
1805 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1806 && cgraph_decide_inlining_incrementally (node,
1807 iterations
1808 ? INLINE_SIZE_NORECURSIVE
1809 : INLINE_SIZE))
1811 timevar_push (TV_INTEGRATION);
1812 todo |= optimize_inline_calls (current_function_decl);
1813 iterations++;
1814 timevar_pop (TV_INTEGRATION);
1816 if (dump_file)
1817 fprintf (dump_file, "Iterations: %i\n", iterations);
1820 cfun->always_inline_functions_inlined = true;
1822 return todo;
1825 struct gimple_opt_pass pass_early_inline =
1828 GIMPLE_PASS,
1829 "einline", /* name */
1830 NULL, /* gate */
1831 cgraph_early_inlining, /* execute */
1832 NULL, /* sub */
1833 NULL, /* next */
1834 0, /* static_pass_number */
1835 TV_INLINE_HEURISTICS, /* tv_id */
1836 0, /* properties_required */
1837 0, /* properties_provided */
1838 0, /* properties_destroyed */
1839 0, /* todo_flags_start */
1840 TODO_dump_func /* todo_flags_finish */
1845 /* See if statement might disappear after inlining. We are not terribly
1846 sophisficated, basically looking for simple abstraction penalty wrappers. */
1848 static bool
1849 likely_eliminated_by_inlining_p (gimple stmt)
1851 enum gimple_code code = gimple_code (stmt);
1852 switch (code)
1854 case GIMPLE_RETURN:
1855 return true;
1856 case GIMPLE_ASSIGN:
1857 if (gimple_num_ops (stmt) != 2)
1858 return false;
1860 /* Casts of parameters, loads from parameters passed by reference
1861 and stores to return value or parameters are probably free after
1862 inlining. */
1863 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1864 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1865 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1866 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1868 tree rhs = gimple_assign_rhs1 (stmt);
1869 tree lhs = gimple_assign_lhs (stmt);
1870 tree inner_rhs = rhs;
1871 tree inner_lhs = lhs;
1872 bool rhs_free = false;
1873 bool lhs_free = false;
1875 while (handled_component_p (inner_lhs)
1876 || TREE_CODE (inner_lhs) == MEM_REF)
1877 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1878 while (handled_component_p (inner_rhs)
1879 || TREE_CODE (inner_rhs) == ADDR_EXPR
1880 || TREE_CODE (inner_rhs) == MEM_REF)
1881 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1884 if (TREE_CODE (inner_rhs) == PARM_DECL
1885 || (TREE_CODE (inner_rhs) == SSA_NAME
1886 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1887 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1888 rhs_free = true;
1889 if (rhs_free && is_gimple_reg (lhs))
1890 lhs_free = true;
1891 if (((TREE_CODE (inner_lhs) == PARM_DECL
1892 || (TREE_CODE (inner_lhs) == SSA_NAME
1893 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1894 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1895 && inner_lhs != lhs)
1896 || TREE_CODE (inner_lhs) == RESULT_DECL
1897 || (TREE_CODE (inner_lhs) == SSA_NAME
1898 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1899 lhs_free = true;
1900 if (lhs_free
1901 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1902 rhs_free = true;
1903 if (lhs_free && rhs_free)
1904 return true;
1906 return false;
1907 default:
1908 return false;
1912 /* Compute function body size parameters for NODE. */
1914 static void
1915 estimate_function_body_sizes (struct cgraph_node *node)
1917 gcov_type time = 0;
1918 gcov_type time_inlining_benefit = 0;
1919 int size = 0;
1920 int size_inlining_benefit = 0;
1921 basic_block bb;
1922 gimple_stmt_iterator bsi;
1923 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1924 tree arg;
1925 int freq;
1926 tree funtype = TREE_TYPE (node->decl);
1928 if (dump_file)
1929 fprintf (dump_file, "Analyzing function body size: %s\n",
1930 cgraph_node_name (node));
1932 gcc_assert (my_function && my_function->cfg);
1933 FOR_EACH_BB_FN (bb, my_function)
1935 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1936 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1938 gimple stmt = gsi_stmt (bsi);
1939 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1940 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1942 if (dump_file && (dump_flags & TDF_DETAILS))
1944 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1945 freq, this_size, this_time);
1946 print_gimple_stmt (dump_file, stmt, 0, 0);
1948 this_time *= freq;
1949 time += this_time;
1950 size += this_size;
1951 if (likely_eliminated_by_inlining_p (stmt))
1953 size_inlining_benefit += this_size;
1954 time_inlining_benefit += this_time;
1955 if (dump_file && (dump_flags & TDF_DETAILS))
1956 fprintf (dump_file, " Likely eliminated\n");
1958 gcc_assert (time >= 0);
1959 gcc_assert (size >= 0);
1962 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1963 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1964 / CGRAPH_FREQ_BASE);
1965 if (dump_file)
1966 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1967 (int)time, (int)time_inlining_benefit,
1968 size, size_inlining_benefit);
1969 time_inlining_benefit += eni_time_weights.call_cost;
1970 size_inlining_benefit += eni_size_weights.call_cost;
1971 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1973 int cost = estimate_move_cost (TREE_TYPE (funtype));
1974 time_inlining_benefit += cost;
1975 size_inlining_benefit += cost;
1977 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = DECL_CHAIN (arg))
1978 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1980 int cost = estimate_move_cost (TREE_TYPE (arg));
1981 time_inlining_benefit += cost;
1982 size_inlining_benefit += cost;
1984 if (time_inlining_benefit > MAX_TIME)
1985 time_inlining_benefit = MAX_TIME;
1986 if (time > MAX_TIME)
1987 time = MAX_TIME;
1988 inline_summary (node)->self_time = time;
1989 inline_summary (node)->self_size = size;
1990 if (dump_file)
1991 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
1992 (int)time, (int)time_inlining_benefit,
1993 size, size_inlining_benefit);
1994 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
1995 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
1998 /* Compute parameters of functions used by inliner. */
1999 unsigned int
2000 compute_inline_parameters (struct cgraph_node *node)
2002 HOST_WIDE_INT self_stack_size;
2004 gcc_assert (!node->global.inlined_to);
2006 /* Estimate the stack size for the function. But not at -O0
2007 because estimated_stack_frame_size is a quadratic problem. */
2008 self_stack_size = optimize ? estimated_stack_frame_size (node->decl) : 0;
2009 inline_summary (node)->estimated_self_stack_size = self_stack_size;
2010 node->global.estimated_stack_size = self_stack_size;
2011 node->global.stack_frame_offset = 0;
2013 /* Can this function be inlined at all? */
2014 node->local.inlinable = tree_inlinable_function_p (node->decl);
2015 if (node->local.inlinable && !node->local.disregard_inline_limits)
2016 node->local.disregard_inline_limits
2017 = DECL_DISREGARD_INLINE_LIMITS (node->decl);
2018 estimate_function_body_sizes (node);
2019 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2020 node->global.time = inline_summary (node)->self_time;
2021 node->global.size = inline_summary (node)->self_size;
2022 return 0;
2026 /* Compute parameters of functions used by inliner using
2027 current_function_decl. */
2028 static unsigned int
2029 compute_inline_parameters_for_current (void)
2031 compute_inline_parameters (cgraph_node (current_function_decl));
2032 return 0;
2035 struct gimple_opt_pass pass_inline_parameters =
2038 GIMPLE_PASS,
2039 "inline_param", /* name */
2040 NULL, /* gate */
2041 compute_inline_parameters_for_current,/* execute */
2042 NULL, /* sub */
2043 NULL, /* next */
2044 0, /* static_pass_number */
2045 TV_INLINE_HEURISTICS, /* tv_id */
2046 0, /* properties_required */
2047 0, /* properties_provided */
2048 0, /* properties_destroyed */
2049 0, /* todo_flags_start */
2050 0 /* todo_flags_finish */
2054 /* This function performs intraprocedural analyzis in NODE that is required to
2055 inline indirect calls. */
2056 static void
2057 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
2059 ipa_analyze_node (node);
2060 if (dump_file && (dump_flags & TDF_DETAILS))
2062 ipa_print_node_params (dump_file, node);
2063 ipa_print_node_jump_functions (dump_file, node);
2067 /* Note function body size. */
2068 static void
2069 analyze_function (struct cgraph_node *node)
2071 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2072 current_function_decl = node->decl;
2074 compute_inline_parameters (node);
2075 /* FIXME: We should remove the optimize check after we ensure we never run
2076 IPA passes when not optimizng. */
2077 if (flag_indirect_inlining && optimize)
2078 inline_indirect_intraprocedural_analysis (node);
2080 current_function_decl = NULL;
2081 pop_cfun ();
2084 /* Called when new function is inserted to callgraph late. */
2085 static void
2086 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
2088 analyze_function (node);
2091 /* Note function body size. */
2092 static void
2093 inline_generate_summary (void)
2095 struct cgraph_node *node;
2097 function_insertion_hook_holder =
2098 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2100 if (flag_indirect_inlining)
2102 ipa_register_cgraph_hooks ();
2103 ipa_check_create_node_params ();
2104 ipa_check_create_edge_args ();
2107 for (node = cgraph_nodes; node; node = node->next)
2108 if (node->analyzed)
2109 analyze_function (node);
2111 return;
2114 /* Apply inline plan to function. */
2115 static unsigned int
2116 inline_transform (struct cgraph_node *node)
2118 unsigned int todo = 0;
2119 struct cgraph_edge *e;
2120 bool inline_p = false;
2122 /* FIXME: Currently the passmanager is adding inline transform more than once to some
2123 clones. This needs revisiting after WPA cleanups. */
2124 if (cfun->after_inlining)
2125 return 0;
2127 /* We might need the body of this function so that we can expand
2128 it inline somewhere else. */
2129 if (cgraph_preserve_function_body_p (node->decl))
2130 save_inline_function_body (node);
2132 for (e = node->callees; e; e = e->next_callee)
2134 cgraph_redirect_edge_call_stmt_to_callee (e);
2135 if (!e->inline_failed || warn_inline)
2136 inline_p = true;
2139 if (inline_p)
2141 timevar_push (TV_INTEGRATION);
2142 todo = optimize_inline_calls (current_function_decl);
2143 timevar_pop (TV_INTEGRATION);
2145 cfun->always_inline_functions_inlined = true;
2146 cfun->after_inlining = true;
2147 return todo | execute_fixup_cfg ();
2150 /* Read inline summary. Jump functions are shared among ipa-cp
2151 and inliner, so when ipa-cp is active, we don't need to write them
2152 twice. */
2154 static void
2155 inline_read_summary (void)
2157 if (flag_indirect_inlining)
2159 ipa_register_cgraph_hooks ();
2160 if (!flag_ipa_cp)
2161 ipa_prop_read_jump_functions ();
2163 function_insertion_hook_holder =
2164 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2167 /* Write inline summary for node in SET.
2168 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
2169 active, we don't need to write them twice. */
2171 static void
2172 inline_write_summary (cgraph_node_set set,
2173 varpool_node_set vset ATTRIBUTE_UNUSED)
2175 if (flag_indirect_inlining && !flag_ipa_cp)
2176 ipa_prop_write_jump_functions (set);
2179 /* When to run IPA inlining. Inlining of always-inline functions
2180 happens during early inlining. */
2182 static bool
2183 gate_cgraph_decide_inlining (void)
2185 /* ??? We'd like to skip this if not optimizing or not inlining as
2186 all always-inline functions have been processed by early
2187 inlining already. But this at least breaks EH with C++ as
2188 we need to unconditionally run fixup_cfg even at -O0.
2189 So leave it on unconditionally for now. */
2190 return 1;
2193 struct ipa_opt_pass_d pass_ipa_inline =
2196 IPA_PASS,
2197 "inline", /* name */
2198 gate_cgraph_decide_inlining, /* gate */
2199 cgraph_decide_inlining, /* execute */
2200 NULL, /* sub */
2201 NULL, /* next */
2202 0, /* static_pass_number */
2203 TV_INLINE_HEURISTICS, /* tv_id */
2204 0, /* properties_required */
2205 0, /* properties_provided */
2206 0, /* properties_destroyed */
2207 TODO_remove_functions, /* todo_flags_finish */
2208 TODO_dump_cgraph | TODO_dump_func
2209 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
2211 inline_generate_summary, /* generate_summary */
2212 inline_write_summary, /* write_summary */
2213 inline_read_summary, /* read_summary */
2214 NULL, /* write_optimization_summary */
2215 NULL, /* read_optimization_summary */
2216 NULL, /* stmt_fixup */
2217 0, /* TODOs */
2218 inline_transform, /* function_transform */
2219 NULL, /* variable_transform */
2223 #include "gt-ipa-inline.h"